2024-11-11 12:41:30,681 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-11 12:41:30,699 main DEBUG Took 0.015117 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-11 12:41:30,699 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-11 12:41:30,700 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-11 12:41:30,701 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-11 12:41:30,703 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,713 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-11 12:41:30,730 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,732 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,733 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,734 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,734 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,735 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,736 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,736 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,737 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,737 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,738 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,739 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,740 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,740 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,741 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,741 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,742 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,742 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,743 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,743 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,744 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,744 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,745 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,745 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-11 12:41:30,746 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,746 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-11 12:41:30,748 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-11 12:41:30,750 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-11 12:41:30,753 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-11 12:41:30,753 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-11 12:41:30,755 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-11 12:41:30,755 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-11 12:41:30,767 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-11 12:41:30,774 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-11 12:41:30,776 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-11 12:41:30,777 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-11 12:41:30,777 main DEBUG createAppenders(={Console}) 2024-11-11 12:41:30,780 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 initialized 2024-11-11 12:41:30,780 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 2024-11-11 12:41:30,781 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@7fb4f2a9 OK. 2024-11-11 12:41:30,781 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-11 12:41:30,782 main DEBUG OutputStream closed 2024-11-11 12:41:30,782 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-11 12:41:30,782 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-11 12:41:30,783 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@54e1c68b OK 2024-11-11 12:41:30,862 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-11 12:41:30,865 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-11 12:41:30,866 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-11 12:41:30,867 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-11 12:41:30,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-11 12:41:30,868 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-11 12:41:30,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-11 12:41:30,869 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-11 12:41:30,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-11 12:41:30,870 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-11 12:41:30,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-11 12:41:30,871 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-11 12:41:30,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-11 12:41:30,872 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-11 12:41:30,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-11 12:41:30,873 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-11 12:41:30,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-11 12:41:30,874 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-11 12:41:30,877 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11 12:41:30,877 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@7dda48d9) with optional ClassLoader: null 2024-11-11 12:41:30,878 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-11 12:41:30,879 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@7dda48d9] started OK. 2024-11-11T12:41:31,118 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8 2024-11-11 12:41:31,121 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-11 12:41:31,121 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-11T12:41:31,131 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.TestAcidGuaranteesWithBasicPolicy timeout: 13 mins 2024-11-11T12:41:31,152 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=1, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-11T12:41:31,156 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f, deleteOnExit=true 2024-11-11T12:41:31,157 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-11-11T12:41:31,158 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/test.cache.data in system properties and HBase conf 2024-11-11T12:41:31,159 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/hadoop.tmp.dir in system properties and HBase conf 2024-11-11T12:41:31,160 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/hadoop.log.dir in system properties and HBase conf 2024-11-11T12:41:31,161 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-11T12:41:31,162 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-11T12:41:31,162 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-11-11T12:41:31,294 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-11T12:41:31,413 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-11T12:41:31,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-11T12:41:31,419 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-11T12:41:31,420 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-11T12:41:31,421 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T12:41:31,421 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-11T12:41:31,422 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-11T12:41:31,423 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-11T12:41:31,423 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T12:41:31,424 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-11T12:41:31,424 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/nfs.dump.dir in system properties and HBase conf 2024-11-11T12:41:31,425 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/java.io.tmpdir in system properties and HBase conf 2024-11-11T12:41:31,425 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-11T12:41:31,426 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-11T12:41:31,426 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-11T12:41:32,533 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-11T12:41:32,671 INFO [Time-limited test {}] log.Log(170): Logging initialized @3085ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-11T12:41:32,779 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T12:41:32,867 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T12:41:32,894 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T12:41:32,895 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T12:41:32,897 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-11T12:41:32,919 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T12:41:32,923 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/hadoop.log.dir/,AVAILABLE} 2024-11-11T12:41:32,924 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T12:41:33,191 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@b03fcff{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/java.io.tmpdir/jetty-localhost-46343-hadoop-hdfs-3_4_1-tests_jar-_-any-7200347998788043971/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T12:41:33,198 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:46343} 2024-11-11T12:41:33,199 INFO [Time-limited test {}] server.Server(415): Started @3613ms 2024-11-11T12:41:33,730 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-11T12:41:33,738 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-11T12:41:33,742 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-11T12:41:33,742 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-11T12:41:33,743 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-11T12:41:33,744 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/hadoop.log.dir/,AVAILABLE} 2024-11-11T12:41:33,745 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-11T12:41:33,914 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1f79ec76{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/java.io.tmpdir/jetty-localhost-43755-hadoop-hdfs-3_4_1-tests_jar-_-any-17653597140111242620/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T12:41:33,916 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:43755} 2024-11-11T12:41:33,916 INFO [Time-limited test {}] server.Server(415): Started @4331ms 2024-11-11T12:41:34,004 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-11T12:41:34,776 WARN [Thread-72 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/dfs/data/data1/current/BP-304736114-172.17.0.3-1731328892201/current, will proceed with Du for space computation calculation, 2024-11-11T12:41:34,785 WARN [Thread-73 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/dfs/data/data2/current/BP-304736114-172.17.0.3-1731328892201/current, will proceed with Du for space computation calculation, 2024-11-11T12:41:34,880 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-11T12:41:34,967 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ca2cb2dc5c78421 with lease ID 0x3fbe7d7da6628391: Processing first storage report for DS-7ced9a19-f4c6-47e1-9a5b-c424a866a0af from datanode DatanodeRegistration(127.0.0.1:44919, datanodeUuid=b74d14b0-0b0d-4c6b-bf00-ddaeb3d8be59, infoPort=34815, infoSecurePort=0, ipcPort=44429, storageInfo=lv=-57;cid=testClusterID;nsid=946589829;c=1731328892201) 2024-11-11T12:41:34,968 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ca2cb2dc5c78421 with lease ID 0x3fbe7d7da6628391: from storage DS-7ced9a19-f4c6-47e1-9a5b-c424a866a0af node DatanodeRegistration(127.0.0.1:44919, datanodeUuid=b74d14b0-0b0d-4c6b-bf00-ddaeb3d8be59, infoPort=34815, infoSecurePort=0, ipcPort=44429, storageInfo=lv=-57;cid=testClusterID;nsid=946589829;c=1731328892201), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-11T12:41:34,969 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ca2cb2dc5c78421 with lease ID 0x3fbe7d7da6628391: Processing first storage report for DS-8b0d7a07-5843-40e1-b6fd-faf101478f2c from datanode DatanodeRegistration(127.0.0.1:44919, datanodeUuid=b74d14b0-0b0d-4c6b-bf00-ddaeb3d8be59, infoPort=34815, infoSecurePort=0, ipcPort=44429, storageInfo=lv=-57;cid=testClusterID;nsid=946589829;c=1731328892201) 2024-11-11T12:41:34,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ca2cb2dc5c78421 with lease ID 0x3fbe7d7da6628391: from storage DS-8b0d7a07-5843-40e1-b6fd-faf101478f2c node DatanodeRegistration(127.0.0.1:44919, datanodeUuid=b74d14b0-0b0d-4c6b-bf00-ddaeb3d8be59, infoPort=34815, infoSecurePort=0, ipcPort=44429, storageInfo=lv=-57;cid=testClusterID;nsid=946589829;c=1731328892201), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-11T12:41:34,974 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8 2024-11-11T12:41:35,093 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/zookeeper_0, clientPort=54294, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-11T12:41:35,110 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=54294 2024-11-11T12:41:35,125 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T12:41:35,130 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T12:41:35,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741825_1001 (size=7) 2024-11-11T12:41:35,910 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 with version=8 2024-11-11T12:41:35,910 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/hbase-staging 2024-11-11T12:41:36,061 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-11T12:41:36,453 INFO [Time-limited test {}] client.ConnectionUtils(129): master/32e78532c8b1:0 server-side Connection retries=45 2024-11-11T12:41:36,479 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T12:41:36,480 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T12:41:36,480 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T12:41:36,482 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T12:41:36,482 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T12:41:36,682 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T12:41:36,765 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-11T12:41:36,777 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-11T12:41:36,782 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T12:41:36,819 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 11126 (auto-detected) 2024-11-11T12:41:36,821 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:03 (auto-detected) 2024-11-11T12:41:36,856 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.3:40877 2024-11-11T12:41:36,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T12:41:36,874 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T12:41:36,893 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:40877 connecting to ZooKeeper ensemble=127.0.0.1:54294 2024-11-11T12:41:36,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:408770x0, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T12:41:36,957 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40877-0x1019759ddb90000 connected 2024-11-11T12:41:37,065 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T12:41:37,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T12:41:37,093 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T12:41:37,104 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40877 2024-11-11T12:41:37,106 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40877 2024-11-11T12:41:37,108 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40877 2024-11-11T12:41:37,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40877 2024-11-11T12:41:37,112 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40877 2024-11-11T12:41:37,123 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18, hbase.cluster.distributed=false 2024-11-11T12:41:37,234 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/32e78532c8b1:0 server-side Connection retries=45 2024-11-11T12:41:37,234 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T12:41:37,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-11T12:41:37,235 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-11T12:41:37,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-11T12:41:37,235 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-11T12:41:37,238 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-11T12:41:37,240 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-11T12:41:37,241 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.3:44673 2024-11-11T12:41:37,243 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-11T12:41:37,250 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-11T12:41:37,252 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T12:41:37,255 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T12:41:37,259 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:44673 connecting to ZooKeeper ensemble=127.0.0.1:54294 2024-11-11T12:41:37,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:446730x0, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-11T12:41:37,264 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:446730x0, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T12:41:37,264 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44673-0x1019759ddb90001 connected 2024-11-11T12:41:37,266 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T12:41:37,267 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-11T12:41:37,268 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44673 2024-11-11T12:41:37,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44673 2024-11-11T12:41:37,269 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44673 2024-11-11T12:41:37,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44673 2024-11-11T12:41:37,270 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44673 2024-11-11T12:41:37,272 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/32e78532c8b1,40877,1731328896051 2024-11-11T12:41:37,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T12:41:37,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T12:41:37,281 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/32e78532c8b1,40877,1731328896051 2024-11-11T12:41:37,288 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;32e78532c8b1:40877 2024-11-11T12:41:37,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T12:41:37,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-11T12:41:37,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:37,304 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:37,305 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T12:41:37,307 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-11T12:41:37,307 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/32e78532c8b1,40877,1731328896051 from backup master directory 2024-11-11T12:41:37,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/32e78532c8b1,40877,1731328896051 2024-11-11T12:41:37,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T12:41:37,310 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-11T12:41:37,311 WARN [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T12:41:37,311 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=32e78532c8b1,40877,1731328896051 2024-11-11T12:41:37,314 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-11T12:41:37,315 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-11T12:41:37,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741826_1002 (size=42) 2024-11-11T12:41:37,793 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/hbase.id with ID: f11fadb8-1718-49f8-a365-7bd02133138b 2024-11-11T12:41:37,869 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-11T12:41:37,933 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:37,941 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:37,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741827_1003 (size=196) 2024-11-11T12:41:37,989 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:41:37,993 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-11T12:41:38,018 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:38,024 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T12:41:38,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741828_1004 (size=1189) 2024-11-11T12:41:38,102 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store 2024-11-11T12:41:38,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741829_1005 (size=34) 2024-11-11T12:41:38,146 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-11T12:41:38,147 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:41:38,149 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T12:41:38,149 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T12:41:38,149 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T12:41:38,149 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T12:41:38,149 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T12:41:38,150 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T12:41:38,150 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T12:41:38,153 WARN [master/32e78532c8b1:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/.initializing 2024-11-11T12:41:38,153 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/WALs/32e78532c8b1,40877,1731328896051 2024-11-11T12:41:38,166 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T12:41:38,181 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32e78532c8b1%2C40877%2C1731328896051, suffix=, logDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/WALs/32e78532c8b1,40877,1731328896051, archiveDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/oldWALs, maxLogs=10 2024-11-11T12:41:38,216 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/WALs/32e78532c8b1,40877,1731328896051/32e78532c8b1%2C40877%2C1731328896051.1731328898187, exclude list is [], retry=0 2024-11-11T12:41:38,250 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44919,DS-7ced9a19-f4c6-47e1-9a5b-c424a866a0af,DISK] 2024-11-11T12:41:38,255 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-11T12:41:38,309 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/WALs/32e78532c8b1,40877,1731328896051/32e78532c8b1%2C40877%2C1731328896051.1731328898187 2024-11-11T12:41:38,310 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34815:34815)] 2024-11-11T12:41:38,311 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:41:38,311 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:41:38,319 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,320 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,427 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-11T12:41:38,432 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:38,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T12:41:38,435 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,439 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-11T12:41:38,439 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:38,440 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:41:38,441 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,445 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-11T12:41:38,445 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:38,446 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:41:38,447 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,450 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-11T12:41:38,450 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:38,454 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:41:38,458 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,460 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,470 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-11T12:41:38,475 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-11T12:41:38,481 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:41:38,482 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72355422, jitterRate=0.07817980647087097}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-11T12:41:38,488 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T12:41:38,489 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-11T12:41:38,520 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2620f730, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:38,566 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-11-11T12:41:38,579 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-11T12:41:38,580 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-11T12:41:38,583 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-11T12:41:38,585 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-11-11T12:41:38,592 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 7 msec 2024-11-11T12:41:38,593 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-11T12:41:38,627 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-11T12:41:38,643 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-11T12:41:38,645 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-11-11T12:41:38,648 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-11T12:41:38,652 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-11T12:41:38,657 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-11-11T12:41:38,660 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-11T12:41:38,666 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-11T12:41:38,668 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-11-11T12:41:38,669 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-11T12:41:38,672 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-11-11T12:41:38,685 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-11T12:41:38,690 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-11T12:41:38,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T12:41:38,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-11T12:41:38,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:38,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:38,698 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=32e78532c8b1,40877,1731328896051, sessionid=0x1019759ddb90000, setting cluster-up flag (Was=false) 2024-11-11T12:41:38,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:38,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:38,724 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-11T12:41:38,726 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32e78532c8b1,40877,1731328896051 2024-11-11T12:41:38,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:38,732 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:38,739 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-11T12:41:38,742 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=32e78532c8b1,40877,1731328896051 2024-11-11T12:41:38,795 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;32e78532c8b1:44673 2024-11-11T12:41:38,796 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1008): ClusterId : f11fadb8-1718-49f8-a365-7bd02133138b 2024-11-11T12:41:38,799 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-11T12:41:38,806 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-11T12:41:38,806 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-11T12:41:38,810 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-11T12:41:38,811 DEBUG [RS:0;32e78532c8b1:44673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ffc437c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:38,820 DEBUG [RS:0;32e78532c8b1:44673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63e5daf2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32e78532c8b1/172.17.0.3:0 2024-11-11T12:41:38,823 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-11-11T12:41:38,823 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-11-11T12:41:38,823 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-11-11T12:41:38,826 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(3073): reportForDuty to master=32e78532c8b1,40877,1731328896051 with isa=32e78532c8b1/172.17.0.3:44673, startcode=1731328897232 2024-11-11T12:41:38,841 DEBUG [RS:0;32e78532c8b1:44673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T12:41:38,856 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-11-11T12:41:38,865 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-11-11T12:41:38,870 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-11T12:41:38,877 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 32e78532c8b1,40877,1731328896051 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-11T12:41:38,884 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/32e78532c8b1:0, corePoolSize=5, maxPoolSize=5 2024-11-11T12:41:38,884 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/32e78532c8b1:0, corePoolSize=5, maxPoolSize=5 2024-11-11T12:41:38,884 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/32e78532c8b1:0, corePoolSize=5, maxPoolSize=5 2024-11-11T12:41:38,884 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44405, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T12:41:38,885 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/32e78532c8b1:0, corePoolSize=5, maxPoolSize=5 2024-11-11T12:41:38,885 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/32e78532c8b1:0, corePoolSize=10, maxPoolSize=10 2024-11-11T12:41:38,885 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:38,885 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/32e78532c8b1:0, corePoolSize=2, maxPoolSize=2 2024-11-11T12:41:38,886 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:38,891 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40877 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:38,894 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731328928893 2024-11-11T12:41:38,895 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-11T12:41:38,895 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-11-11T12:41:38,896 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-11-11T12:41:38,896 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-11T12:41:38,900 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-11T12:41:38,901 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-11T12:41:38,901 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-11T12:41:38,904 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-11T12:41:38,902 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:38,904 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T12:41:38,915 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:38,925 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-11T12:41:38,926 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-11T12:41:38,927 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-11T12:41:38,929 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-11-11T12:41:38,929 WARN [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-11T12:41:38,932 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-11T12:41:38,932 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-11T12:41:38,936 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/32e78532c8b1:0:becomeActiveMaster-HFileCleaner.large.0-1731328898934,5,FailOnTimeoutGroup] 2024-11-11T12:41:38,936 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/32e78532c8b1:0:becomeActiveMaster-HFileCleaner.small.0-1731328898936,5,FailOnTimeoutGroup] 2024-11-11T12:41:38,937 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:38,937 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-11T12:41:38,938 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:38,938 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:38,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741831_1007 (size=1039) 2024-11-11T12:41:39,031 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(3073): reportForDuty to master=32e78532c8b1,40877,1731328896051 with isa=32e78532c8b1/172.17.0.3:44673, startcode=1731328897232 2024-11-11T12:41:39,033 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40877 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:39,037 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40877 {}] master.ServerManager(486): Registering regionserver=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:39,049 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:41:39,050 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:42421 2024-11-11T12:41:39,050 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-11-11T12:41:39,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T12:41:39,058 DEBUG [RS:0;32e78532c8b1:44673 {}] zookeeper.ZKUtil(111): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/32e78532c8b1,44673,1731328897232 2024-11-11T12:41:39,058 WARN [RS:0;32e78532c8b1:44673 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-11T12:41:39,058 INFO [RS:0;32e78532c8b1:44673 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T12:41:39,059 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/WALs/32e78532c8b1,44673,1731328897232 2024-11-11T12:41:39,062 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [32e78532c8b1,44673,1731328897232] 2024-11-11T12:41:39,084 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-11-11T12:41:39,098 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-11T12:41:39,115 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-11T12:41:39,121 INFO [RS:0;32e78532c8b1:44673 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-11T12:41:39,122 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:39,123 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-11-11T12:41:39,131 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:39,132 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,132 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,132 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,132 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,133 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,133 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/32e78532c8b1:0, corePoolSize=2, maxPoolSize=2 2024-11-11T12:41:39,134 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,134 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,135 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,135 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,135 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/32e78532c8b1:0, corePoolSize=1, maxPoolSize=1 2024-11-11T12:41:39,135 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/32e78532c8b1:0, corePoolSize=3, maxPoolSize=3 2024-11-11T12:41:39,136 DEBUG [RS:0;32e78532c8b1:44673 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0, corePoolSize=3, maxPoolSize=3 2024-11-11T12:41:39,148 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:39,148 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:39,148 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:39,148 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:39,148 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=32e78532c8b1,44673,1731328897232-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T12:41:39,184 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-11T12:41:39,187 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(168): Chore ScheduledChore name=32e78532c8b1,44673,1731328897232-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:39,216 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.Replication(204): 32e78532c8b1,44673,1731328897232 started 2024-11-11T12:41:39,216 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1767): Serving as 32e78532c8b1,44673,1731328897232, RpcServer on 32e78532c8b1/172.17.0.3:44673, sessionid=0x1019759ddb90001 2024-11-11T12:41:39,217 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-11T12:41:39,217 DEBUG [RS:0;32e78532c8b1:44673 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:39,217 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32e78532c8b1,44673,1731328897232' 2024-11-11T12:41:39,217 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-11T12:41:39,219 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-11T12:41:39,220 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-11T12:41:39,220 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-11T12:41:39,220 DEBUG [RS:0;32e78532c8b1:44673 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:39,220 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '32e78532c8b1,44673,1731328897232' 2024-11-11T12:41:39,220 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-11T12:41:39,222 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-11T12:41:39,224 DEBUG [RS:0;32e78532c8b1:44673 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-11T12:41:39,225 INFO [RS:0;32e78532c8b1:44673 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-11T12:41:39,225 INFO [RS:0;32e78532c8b1:44673 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-11T12:41:39,333 INFO [RS:0;32e78532c8b1:44673 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-11T12:41:39,338 INFO [RS:0;32e78532c8b1:44673 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32e78532c8b1%2C44673%2C1731328897232, suffix=, logDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/WALs/32e78532c8b1,44673,1731328897232, archiveDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/oldWALs, maxLogs=32 2024-11-11T12:41:39,344 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-11-11T12:41:39,345 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:41:39,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741832_1008 (size=32) 2024-11-11T12:41:39,366 DEBUG [RS:0;32e78532c8b1:44673 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/WALs/32e78532c8b1,44673,1731328897232/32e78532c8b1%2C44673%2C1731328897232.1731328899341, exclude list is [], retry=0 2024-11-11T12:41:39,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:41:39,378 DEBUG [RS-EventLoopGroup-3-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44919,DS-7ced9a19-f4c6-47e1-9a5b-c424a866a0af,DISK] 2024-11-11T12:41:39,379 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T12:41:39,382 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T12:41:39,383 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:39,384 INFO [RS:0;32e78532c8b1:44673 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/WALs/32e78532c8b1,44673,1731328897232/32e78532c8b1%2C44673%2C1731328897232.1731328899341 2024-11-11T12:41:39,384 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T12:41:39,385 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T12:41:39,385 DEBUG [RS:0;32e78532c8b1:44673 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34815:34815)] 2024-11-11T12:41:39,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T12:41:39,399 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:39,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T12:41:39,401 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T12:41:39,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T12:41:39,405 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:39,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T12:41:39,416 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740 2024-11-11T12:41:39,417 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740 2024-11-11T12:41:39,423 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:41:39,426 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-11T12:41:39,433 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:41:39,434 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75206894, jitterRate=0.12067005038261414}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:41:39,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-11T12:41:39,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-11T12:41:39,439 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-11T12:41:39,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-11T12:41:39,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T12:41:39,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T12:41:39,448 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-11T12:41:39,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-11T12:41:39,452 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-11-11T12:41:39,452 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-11-11T12:41:39,461 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-11T12:41:39,470 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-11T12:41:39,476 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-11T12:41:39,628 DEBUG [32e78532c8b1:40877 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-11T12:41:39,635 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:39,642 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32e78532c8b1,44673,1731328897232, state=OPENING 2024-11-11T12:41:39,651 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-11T12:41:39,661 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:39,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:39,669 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T12:41:39,671 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T12:41:39,677 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:41:39,862 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:39,864 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-11T12:41:39,868 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-11T12:41:39,880 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-11-11T12:41:39,880 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-11T12:41:39,881 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-11T12:41:39,893 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=32e78532c8b1%2C44673%2C1731328897232.meta, suffix=.meta, logDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/WALs/32e78532c8b1,44673,1731328897232, archiveDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/oldWALs, maxLogs=32 2024-11-11T12:41:39,917 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(600): When create output stream for /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/WALs/32e78532c8b1,44673,1731328897232/32e78532c8b1%2C44673%2C1731328897232.meta.1731328899895.meta, exclude list is [], retry=0 2024-11-11T12:41:39,924 DEBUG [RS-EventLoopGroup-3-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:44919,DS-7ced9a19-f4c6-47e1-9a5b-c424a866a0af,DISK] 2024-11-11T12:41:39,930 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/WALs/32e78532c8b1,44673,1731328897232/32e78532c8b1%2C44673%2C1731328897232.meta.1731328899895.meta 2024-11-11T12:41:39,931 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34815:34815)] 2024-11-11T12:41:39,931 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:41:39,933 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-11T12:41:40,010 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-11T12:41:40,016 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-11T12:41:40,022 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-11T12:41:40,022 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:41:40,022 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-11-11T12:41:40,022 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-11-11T12:41:40,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-11T12:41:40,030 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-11T12:41:40,030 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:40,031 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T12:41:40,032 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-11T12:41:40,033 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-11T12:41:40,034 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:40,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T12:41:40,035 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-11T12:41:40,036 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-11T12:41:40,037 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:40,038 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-11T12:41:40,039 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740 2024-11-11T12:41:40,042 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740 2024-11-11T12:41:40,046 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:41:40,049 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-11-11T12:41:40,051 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59396744, jitterRate=-0.11491954326629639}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:41:40,052 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-11-11T12:41:40,060 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731328899855 2024-11-11T12:41:40,071 DEBUG [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-11T12:41:40,071 INFO [RS_OPEN_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-11-11T12:41:40,072 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:40,074 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 32e78532c8b1,44673,1731328897232, state=OPEN 2024-11-11T12:41:40,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T12:41:40,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-11T12:41:40,084 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T12:41:40,084 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-11T12:41:40,089 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-11-11T12:41:40,089 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=32e78532c8b1,44673,1731328897232 in 407 msec 2024-11-11T12:41:40,095 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-11-11T12:41:40,096 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 630 msec 2024-11-11T12:41:40,101 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.3110 sec 2024-11-11T12:41:40,101 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731328900101, completionTime=-1 2024-11-11T12:41:40,101 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-11T12:41:40,101 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-11-11T12:41:40,140 DEBUG [hconnection-0x23ab8f3b-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:40,143 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40904, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:40,156 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=1 2024-11-11T12:41:40,156 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731328960156 2024-11-11T12:41:40,156 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731329020156 2024-11-11T12:41:40,156 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 54 msec 2024-11-11T12:41:40,183 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32e78532c8b1,40877,1731328896051-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:40,184 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32e78532c8b1,40877,1731328896051-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:40,184 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32e78532c8b1,40877,1731328896051-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:40,185 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-32e78532c8b1:40877, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:40,186 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:40,194 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-11-11T12:41:40,196 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-11T12:41:40,197 DEBUG [master/32e78532c8b1:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-11-11T12:41:40,203 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-11-11T12:41:40,206 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T12:41:40,207 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:40,210 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T12:41:40,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741835_1011 (size=358) 2024-11-11T12:41:40,635 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 69716d04bd60881dfce8676dd10b689d, NAME => 'hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:41:40,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741836_1012 (size=42) 2024-11-11T12:41:41,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:41:41,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing 69716d04bd60881dfce8676dd10b689d, disabling compactions & flushes 2024-11-11T12:41:41,073 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:41:41,073 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:41:41,074 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. after waiting 0 ms 2024-11-11T12:41:41,074 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:41:41,074 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:41:41,074 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for 69716d04bd60881dfce8676dd10b689d: 2024-11-11T12:41:41,077 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T12:41:41,140 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1731328901078"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731328901078"}]},"ts":"1731328901078"} 2024-11-11T12:41:41,190 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T12:41:41,198 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T12:41:41,202 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328901199"}]},"ts":"1731328901199"} 2024-11-11T12:41:41,208 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-11-11T12:41:41,215 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=69716d04bd60881dfce8676dd10b689d, ASSIGN}] 2024-11-11T12:41:41,217 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=69716d04bd60881dfce8676dd10b689d, ASSIGN 2024-11-11T12:41:41,222 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=69716d04bd60881dfce8676dd10b689d, ASSIGN; state=OFFLINE, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=false 2024-11-11T12:41:41,373 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=69716d04bd60881dfce8676dd10b689d, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:41,379 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure 69716d04bd60881dfce8676dd10b689d, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:41:41,533 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:41,540 INFO [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:41:41,541 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => 69716d04bd60881dfce8676dd10b689d, NAME => 'hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:41:41,542 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:41:41,542 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:41:41,542 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:41:41,542 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:41:41,545 INFO [StoreOpener-69716d04bd60881dfce8676dd10b689d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:41:41,548 INFO [StoreOpener-69716d04bd60881dfce8676dd10b689d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 69716d04bd60881dfce8676dd10b689d columnFamilyName info 2024-11-11T12:41:41,549 DEBUG [StoreOpener-69716d04bd60881dfce8676dd10b689d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:41,549 INFO [StoreOpener-69716d04bd60881dfce8676dd10b689d-1 {}] regionserver.HStore(327): Store=69716d04bd60881dfce8676dd10b689d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:41:41,551 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d 2024-11-11T12:41:41,552 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d 2024-11-11T12:41:41,556 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:41:41,559 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:41:41,560 INFO [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened 69716d04bd60881dfce8676dd10b689d; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70103937, jitterRate=0.04463006556034088}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-11T12:41:41,561 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for 69716d04bd60881dfce8676dd10b689d: 2024-11-11T12:41:41,563 INFO [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d., pid=6, masterSystemTime=1731328901533 2024-11-11T12:41:41,566 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:41:41,567 INFO [RS_OPEN_PRIORITY_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:41:41,568 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=69716d04bd60881dfce8676dd10b689d, regionState=OPEN, openSeqNum=2, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:41,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-11-11T12:41:41,578 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure 69716d04bd60881dfce8676dd10b689d, server=32e78532c8b1,44673,1731328897232 in 193 msec 2024-11-11T12:41:41,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-11-11T12:41:41,580 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=69716d04bd60881dfce8676dd10b689d, ASSIGN in 361 msec 2024-11-11T12:41:41,582 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T12:41:41,582 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328901582"}]},"ts":"1731328901582"} 2024-11-11T12:41:41,585 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-11-11T12:41:41,590 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T12:41:41,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 1.3930 sec 2024-11-11T12:41:41,634 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-11-11T12:41:41,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:41,636 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-11-11T12:41:41,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:41:41,677 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-11-11T12:41:41,697 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-11T12:41:41,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 32 msec 2024-11-11T12:41:41,723 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-11-11T12:41:41,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-11-11T12:41:41,746 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 22 msec 2024-11-11T12:41:41,762 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-11-11T12:41:41,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-11-11T12:41:41,767 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 4.456sec 2024-11-11T12:41:41,769 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-11T12:41:41,770 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-11T12:41:41,772 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-11T12:41:41,772 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-11T12:41:41,772 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-11T12:41:41,773 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32e78532c8b1,40877,1731328896051-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-11T12:41:41,774 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32e78532c8b1,40877,1731328896051-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-11T12:41:41,782 DEBUG [master/32e78532c8b1:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-11-11T12:41:41,783 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-11T12:41:41,783 INFO [master/32e78532c8b1:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=32e78532c8b1,40877,1731328896051-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-11T12:41:41,854 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x76523d14 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@46873e4f 2024-11-11T12:41:41,855 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-11-11T12:41:41,862 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76ba07, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:41,865 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-11T12:41:41,865 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-11T12:41:41,875 DEBUG [hconnection-0x7edf53b1-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:41,883 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40918, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:41,891 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=32e78532c8b1,40877,1731328896051 2024-11-11T12:41:41,909 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=219, OpenFileDescriptor=444, MaxFileDescriptor=1048576, SystemLoadAverage=695, ProcessCount=11, AvailableMemoryMB=3156 2024-11-11T12:41:41,919 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T12:41:41,922 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T12:41:41,929 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:41:41,934 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:41:41,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-11T12:41:41,939 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T12:41:41,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 9 2024-11-11T12:41:41,940 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:41,943 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T12:41:41,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T12:41:41,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741837_1013 (size=960) 2024-11-11T12:41:42,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T12:41:42,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T12:41:42,363 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:41:42,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741838_1014 (size=53) 2024-11-11T12:41:42,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T12:41:42,776 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:41:42,776 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 0a6a9f82df0ac9ece8343137343e2f72, disabling compactions & flushes 2024-11-11T12:41:42,777 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:42,777 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:42,777 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. after waiting 0 ms 2024-11-11T12:41:42,777 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:42,777 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:42,777 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:42,779 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T12:41:42,779 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731328902779"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731328902779"}]},"ts":"1731328902779"} 2024-11-11T12:41:42,783 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T12:41:42,785 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T12:41:42,785 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328902785"}]},"ts":"1731328902785"} 2024-11-11T12:41:42,787 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-11T12:41:42,792 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a6a9f82df0ac9ece8343137343e2f72, ASSIGN}] 2024-11-11T12:41:42,794 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a6a9f82df0ac9ece8343137343e2f72, ASSIGN 2024-11-11T12:41:42,795 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a6a9f82df0ac9ece8343137343e2f72, ASSIGN; state=OFFLINE, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=false 2024-11-11T12:41:42,946 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=0a6a9f82df0ac9ece8343137343e2f72, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:42,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:41:43,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T12:41:43,105 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:43,116 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:43,117 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:41:43,117 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,117 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:41:43,118 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,118 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,120 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,125 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:41:43,126 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a6a9f82df0ac9ece8343137343e2f72 columnFamilyName A 2024-11-11T12:41:43,126 DEBUG [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:43,127 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.HStore(327): Store=0a6a9f82df0ac9ece8343137343e2f72/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:41:43,127 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,131 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:41:43,131 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a6a9f82df0ac9ece8343137343e2f72 columnFamilyName B 2024-11-11T12:41:43,131 DEBUG [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:43,133 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.HStore(327): Store=0a6a9f82df0ac9ece8343137343e2f72/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:41:43,133 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,135 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:41:43,136 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0a6a9f82df0ac9ece8343137343e2f72 columnFamilyName C 2024-11-11T12:41:43,136 DEBUG [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:41:43,137 INFO [StoreOpener-0a6a9f82df0ac9ece8343137343e2f72-1 {}] regionserver.HStore(327): Store=0a6a9f82df0ac9ece8343137343e2f72/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:41:43,139 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:43,141 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,142 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,146 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:41:43,149 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:43,155 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:41:43,156 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1102): Opened 0a6a9f82df0ac9ece8343137343e2f72; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63744191, jitterRate=-0.050137534737586975}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:41:43,158 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:43,159 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., pid=11, masterSystemTime=1731328903105 2024-11-11T12:41:43,164 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:43,164 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:43,165 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=0a6a9f82df0ac9ece8343137343e2f72, regionState=OPEN, openSeqNum=2, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:43,172 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-11-11T12:41:43,173 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 in 219 msec 2024-11-11T12:41:43,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-11-11T12:41:43,177 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a6a9f82df0ac9ece8343137343e2f72, ASSIGN in 381 msec 2024-11-11T12:41:43,178 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T12:41:43,178 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328903178"}]},"ts":"1731328903178"} 2024-11-11T12:41:43,181 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-11T12:41:43,187 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T12:41:43,190 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2530 sec 2024-11-11T12:41:44,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-11-11T12:41:44,072 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 9 completed 2024-11-11T12:41:44,077 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6fcb5f29 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7fdf5682 2024-11-11T12:41:44,081 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f6e36fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,084 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,087 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40924, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,090 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T12:41:44,092 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:51742, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T12:41:44,099 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f2091cc to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79d38d10 2024-11-11T12:41:44,103 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6f343a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,105 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09bd0964 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c63ae4e 2024-11-11T12:41:44,110 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1324ee83, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,112 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x18cb251d to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@736f1673 2024-11-11T12:41:44,116 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@478bae6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,117 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x45b55c24 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ee2166f 2024-11-11T12:41:44,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48068a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,123 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0e52b42a to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3f34ff67 2024-11-11T12:41:44,127 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38766d64, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,129 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09ed28bb to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4b5cad1a 2024-11-11T12:41:44,133 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@295cb1ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,134 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x12a1285d to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3c3b736e 2024-11-11T12:41:44,137 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70267494, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,139 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x353bc462 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@767a8485 2024-11-11T12:41:44,145 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d2a8e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,146 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x47fe2fa7 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6502d571 2024-11-11T12:41:44,151 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c915d17, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:41:44,159 DEBUG [hconnection-0x3785b265-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,162 DEBUG [hconnection-0x175a69d5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,162 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40940, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,165 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40950, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,165 DEBUG [hconnection-0xafaa2be-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,167 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40952, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,169 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:41:44,174 DEBUG [hconnection-0x28a159a5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees 2024-11-11T12:41:44,179 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:41:44,181 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=12, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:41:44,183 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:41:44,191 DEBUG [hconnection-0x685e87a2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,193 DEBUG [hconnection-0x1ac08854-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T12:41:44,195 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40954, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,198 DEBUG [hconnection-0x1d254bb7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,204 DEBUG [hconnection-0x6dc8eca7-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,206 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40966, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,217 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,218 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40992, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,222 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41006, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,227 DEBUG [hconnection-0x521649e0-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:41:44,229 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:41022, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:41:44,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:44,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:41:44,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:44,254 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:44,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:44,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:44,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:44,255 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T12:41:44,353 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:44,366 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/b43e7f9cd7e6420a8644f311bb811aeb is 50, key is test_row_0/A:col10/1731328904228/Put/seqid=0 2024-11-11T12:41:44,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:44,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:44,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:44,373 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,375 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741839_1015 (size=12001) 2024-11-11T12:41:44,416 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/b43e7f9cd7e6420a8644f311bb811aeb 2024-11-11T12:41:44,497 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328964482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,498 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328964489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,500 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328964498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,501 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328964498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T12:41:44,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328964504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,537 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:44,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/f380097b5c614a92af7e79cbf6323415 is 50, key is test_row_0/B:col10/1731328904228/Put/seqid=0 2024-11-11T12:41:44,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:44,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:44,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:44,558 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,559 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741840_1016 (size=12001) 2024-11-11T12:41:44,593 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/f380097b5c614a92af7e79cbf6323415 2024-11-11T12:41:44,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328964608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328964609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328964609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,614 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328964611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,616 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328964613, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,671 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/d9aea97c714c4f2b80e46314203afec3 is 50, key is test_row_0/C:col10/1731328904228/Put/seqid=0 2024-11-11T12:41:44,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741841_1017 (size=12001) 2024-11-11T12:41:44,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=14 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/d9aea97c714c4f2b80e46314203afec3 2024-11-11T12:41:44,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/b43e7f9cd7e6420a8644f311bb811aeb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b43e7f9cd7e6420a8644f311bb811aeb 2024-11-11T12:41:44,719 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:44,722 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:44,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:44,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:44,723 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,723 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,738 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b43e7f9cd7e6420a8644f311bb811aeb, entries=150, sequenceid=14, filesize=11.7 K 2024-11-11T12:41:44,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/f380097b5c614a92af7e79cbf6323415 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f380097b5c614a92af7e79cbf6323415 2024-11-11T12:41:44,774 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f380097b5c614a92af7e79cbf6323415, entries=150, sequenceid=14, filesize=11.7 K 2024-11-11T12:41:44,777 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/d9aea97c714c4f2b80e46314203afec3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/d9aea97c714c4f2b80e46314203afec3 2024-11-11T12:41:44,792 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/d9aea97c714c4f2b80e46314203afec3, entries=150, sequenceid=14, filesize=11.7 K 2024-11-11T12:41:44,798 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 0a6a9f82df0ac9ece8343137343e2f72 in 561ms, sequenceid=14, compaction requested=false 2024-11-11T12:41:44,800 DEBUG [MemStoreFlusher.0 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestAcidGuarantees' 2024-11-11T12:41:44,801 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:44,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T12:41:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:44,833 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:41:44,833 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:44,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:44,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:44,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:44,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:44,834 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:44,867 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/fa7db528b8a94127bef905a27e25f742 is 50, key is test_row_0/A:col10/1731328904481/Put/seqid=0 2024-11-11T12:41:44,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328964875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,889 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328964877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,890 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328964879, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,891 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328964881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,892 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328964881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,893 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:44,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:44,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:44,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:44,894 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,894 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:44,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741842_1018 (size=14341) 2024-11-11T12:41:44,998 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:44,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328964992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:44,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328964995, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,001 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328964996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,008 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328964998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,010 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328965006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,056 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,057 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:45,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:45,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,058 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,058 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,211 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328965208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,213 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328965208, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,214 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,214 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328965209, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,215 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,216 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:45,219 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328965213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328965215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:45,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,221 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,221 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,311 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/fa7db528b8a94127bef905a27e25f742 2024-11-11T12:41:45,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T12:41:45,350 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/421e5b3244be4c7ca9b5ecf7954831a1 is 50, key is test_row_0/B:col10/1731328904481/Put/seqid=0 2024-11-11T12:41:45,380 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:45,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:45,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,382 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741843_1019 (size=12001) 2024-11-11T12:41:45,399 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/421e5b3244be4c7ca9b5ecf7954831a1 2024-11-11T12:41:45,469 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/540dcfe32bf144478f794580454e5922 is 50, key is test_row_0/C:col10/1731328904481/Put/seqid=0 2024-11-11T12:41:45,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741844_1020 (size=12001) 2024-11-11T12:41:45,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/540dcfe32bf144478f794580454e5922 2024-11-11T12:41:45,519 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/fa7db528b8a94127bef905a27e25f742 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/fa7db528b8a94127bef905a27e25f742 2024-11-11T12:41:45,532 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328965518, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/fa7db528b8a94127bef905a27e25f742, entries=200, sequenceid=39, filesize=14.0 K 2024-11-11T12:41:45,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328965523, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,534 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328965526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/421e5b3244be4c7ca9b5ecf7954831a1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/421e5b3244be4c7ca9b5ecf7954831a1 2024-11-11T12:41:45,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328965527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:45,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328965527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,538 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:45,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:45,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,540 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] handler.RSProcedureHandler(58): pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=13 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=13 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:45,549 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/421e5b3244be4c7ca9b5ecf7954831a1, entries=150, sequenceid=39, filesize=11.7 K 2024-11-11T12:41:45,549 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T12:41:45,553 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/540dcfe32bf144478f794580454e5922 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/540dcfe32bf144478f794580454e5922 2024-11-11T12:41:45,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/540dcfe32bf144478f794580454e5922, entries=150, sequenceid=39, filesize=11.7 K 2024-11-11T12:41:45,588 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=67.09 KB/68700 for 0a6a9f82df0ac9ece8343137343e2f72 in 755ms, sequenceid=39, compaction requested=false 2024-11-11T12:41:45,589 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:45,648 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-11T12:41:45,650 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-11-11T12:41:45,695 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:45,696 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=13 2024-11-11T12:41:45,696 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:45,696 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:41:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:45,697 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:45,719 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/4c608721ed654cef97a5bc9e59acac72 is 50, key is test_row_0/A:col10/1731328904872/Put/seqid=0 2024-11-11T12:41:45,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741845_1021 (size=12001) 2024-11-11T12:41:45,744 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/4c608721ed654cef97a5bc9e59acac72 2024-11-11T12:41:45,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/fdbfbeb7b62349ba99815838c8da1d52 is 50, key is test_row_0/B:col10/1731328904872/Put/seqid=0 2024-11-11T12:41:45,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741846_1022 (size=12001) 2024-11-11T12:41:45,842 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/fdbfbeb7b62349ba99815838c8da1d52 2024-11-11T12:41:45,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/293e90c3ddd847d7a2405afda7404815 is 50, key is test_row_0/C:col10/1731328904872/Put/seqid=0 2024-11-11T12:41:45,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741847_1023 (size=12001) 2024-11-11T12:41:45,944 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/293e90c3ddd847d7a2405afda7404815 2024-11-11T12:41:45,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/4c608721ed654cef97a5bc9e59acac72 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4c608721ed654cef97a5bc9e59acac72 2024-11-11T12:41:45,985 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4c608721ed654cef97a5bc9e59acac72, entries=150, sequenceid=52, filesize=11.7 K 2024-11-11T12:41:45,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/fdbfbeb7b62349ba99815838c8da1d52 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fdbfbeb7b62349ba99815838c8da1d52 2024-11-11T12:41:46,003 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fdbfbeb7b62349ba99815838c8da1d52, entries=150, sequenceid=52, filesize=11.7 K 2024-11-11T12:41:46,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/293e90c3ddd847d7a2405afda7404815 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/293e90c3ddd847d7a2405afda7404815 2024-11-11T12:41:46,016 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/293e90c3ddd847d7a2405afda7404815, entries=150, sequenceid=52, filesize=11.7 K 2024-11-11T12:41:46,018 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=0 B/0 for 0a6a9f82df0ac9ece8343137343e2f72 in 322ms, sequenceid=52, compaction requested=true 2024-11-11T12:41:46,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:46,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-11T12:41:46,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=13 2024-11-11T12:41:46,026 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-11-11T12:41:46,026 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8390 sec 2024-11-11T12:41:46,029 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=12, table=TestAcidGuarantees in 1.8560 sec 2024-11-11T12:41:46,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:46,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:41:46,087 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:46,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:46,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:46,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:46,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:46,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:46,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/5a3e25f98fa847e6a460e7e4de0ff0ec is 50, key is test_row_0/A:col10/1731328906065/Put/seqid=0 2024-11-11T12:41:46,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741848_1024 (size=16681) 2024-11-11T12:41:46,205 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328966188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,217 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328966202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328966202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,219 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328966205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328966207, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,314 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328966308, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-11-11T12:41:46,318 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 12 completed 2024-11-11T12:41:46,322 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:41:46,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees 2024-11-11T12:41:46,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328966320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-11T12:41:46,328 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:41:46,330 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=14, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:41:46,330 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=14, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:41:46,330 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328966325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,331 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328966326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328966327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-11T12:41:46,484 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,485 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-11T12:41:46,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:46,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,486 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,486 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,531 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328966521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/5a3e25f98fa847e6a460e7e4de0ff0ec 2024-11-11T12:41:46,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328966530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,538 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328966533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,549 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328966542, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328966548, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,563 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/6b9b00cf7632497d8ef21e004f1caf80 is 50, key is test_row_0/B:col10/1731328906065/Put/seqid=0 2024-11-11T12:41:46,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741849_1025 (size=12001) 2024-11-11T12:41:46,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-11T12:41:46,639 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,640 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-11T12:41:46,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,640 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:46,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,760 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-11T12:41:46,761 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-11T12:41:46,763 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-11-11T12:41:46,763 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-11-11T12:41:46,765 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T12:41:46,765 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-11T12:41:46,766 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-11T12:41:46,766 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-11T12:41:46,768 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees 2024-11-11T12:41:46,768 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestAcidGuarantees Metrics about Tables on a single HBase RegionServer 2024-11-11T12:41:46,795 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-11T12:41:46,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:46,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,798 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,798 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,845 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328966837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,846 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328966841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,853 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328966849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,855 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328966853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,862 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:46,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328966860, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-11T12:41:46,951 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:46,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-11T12:41:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:46,959 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:46,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:47,010 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/6b9b00cf7632497d8ef21e004f1caf80 2024-11-11T12:41:47,038 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/0f0f0394872f40c682e9b01a1a351917 is 50, key is test_row_0/C:col10/1731328906065/Put/seqid=0 2024-11-11T12:41:47,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741850_1026 (size=12001) 2024-11-11T12:41:47,115 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,115 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-11T12:41:47,116 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=63 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/0f0f0394872f40c682e9b01a1a351917 2024-11-11T12:41:47,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:47,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:47,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:47,116 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] handler.RSProcedureHandler(58): pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:47,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=15 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:47,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=15 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:47,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/5a3e25f98fa847e6a460e7e4de0ff0ec as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/5a3e25f98fa847e6a460e7e4de0ff0ec 2024-11-11T12:41:47,158 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/5a3e25f98fa847e6a460e7e4de0ff0ec, entries=250, sequenceid=63, filesize=16.3 K 2024-11-11T12:41:47,166 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/6b9b00cf7632497d8ef21e004f1caf80 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/6b9b00cf7632497d8ef21e004f1caf80 2024-11-11T12:41:47,179 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/6b9b00cf7632497d8ef21e004f1caf80, entries=150, sequenceid=63, filesize=11.7 K 2024-11-11T12:41:47,183 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/0f0f0394872f40c682e9b01a1a351917 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f0f0394872f40c682e9b01a1a351917 2024-11-11T12:41:47,213 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f0f0394872f40c682e9b01a1a351917, entries=150, sequenceid=63, filesize=11.7 K 2024-11-11T12:41:47,219 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=154.31 KB/158010 for 0a6a9f82df0ac9ece8343137343e2f72 in 1132ms, sequenceid=63, compaction requested=true 2024-11-11T12:41:47,219 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:47,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:41:47,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:47,236 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:41:47,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:47,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:41:47,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:47,237 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:47,239 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:47,243 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:47,245 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:41:47,245 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:47,245 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f380097b5c614a92af7e79cbf6323415, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/421e5b3244be4c7ca9b5ecf7954831a1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fdbfbeb7b62349ba99815838c8da1d52, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/6b9b00cf7632497d8ef21e004f1caf80] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=46.9 K 2024-11-11T12:41:47,248 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f380097b5c614a92af7e79cbf6323415, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1731328904228 2024-11-11T12:41:47,249 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 421e5b3244be4c7ca9b5ecf7954831a1, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731328904481 2024-11-11T12:41:47,249 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting fdbfbeb7b62349ba99815838c8da1d52, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731328904841 2024-11-11T12:41:47,251 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 6b9b00cf7632497d8ef21e004f1caf80, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731328906065 2024-11-11T12:41:47,251 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 55024 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:47,251 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:41:47,251 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:47,252 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b43e7f9cd7e6420a8644f311bb811aeb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/fa7db528b8a94127bef905a27e25f742, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4c608721ed654cef97a5bc9e59acac72, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/5a3e25f98fa847e6a460e7e4de0ff0ec] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=53.7 K 2024-11-11T12:41:47,253 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b43e7f9cd7e6420a8644f311bb811aeb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1731328904228 2024-11-11T12:41:47,253 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa7db528b8a94127bef905a27e25f742, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731328904481 2024-11-11T12:41:47,254 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c608721ed654cef97a5bc9e59acac72, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731328904841 2024-11-11T12:41:47,255 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a3e25f98fa847e6a460e7e4de0ff0ec, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731328906043 2024-11-11T12:41:47,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=15 2024-11-11T12:41:47,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:47,273 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-11T12:41:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:47,274 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:47,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/595810c80a84428888e00893f35cc2db is 50, key is test_row_0/A:col10/1731328906173/Put/seqid=0 2024-11-11T12:41:47,320 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#13 average throughput is 0.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:47,321 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/13754117f51946ffbf487e065e75ade4 is 50, key is test_row_0/B:col10/1731328906065/Put/seqid=0 2024-11-11T12:41:47,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741851_1027 (size=12001) 2024-11-11T12:41:47,327 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/595810c80a84428888e00893f35cc2db 2024-11-11T12:41:47,332 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#14 average throughput is 0.82 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:47,333 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/e43491779e7c45b18fd160a121b47c67 is 50, key is test_row_0/A:col10/1731328906065/Put/seqid=0 2024-11-11T12:41:47,336 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741852_1028 (size=12139) 2024-11-11T12:41:47,350 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/13754117f51946ffbf487e065e75ade4 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/13754117f51946ffbf487e065e75ade4 2024-11-11T12:41:47,367 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/eec19bb8cf584693a2aa3ad7dac623cf is 50, key is test_row_0/B:col10/1731328906173/Put/seqid=0 2024-11-11T12:41:47,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:47,372 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:47,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741853_1029 (size=12139) 2024-11-11T12:41:47,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741854_1030 (size=12001) 2024-11-11T12:41:47,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328967394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,413 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/eec19bb8cf584693a2aa3ad7dac623cf 2024-11-11T12:41:47,413 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328967396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328967399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,415 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328967399, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328967394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,417 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 13754117f51946ffbf487e065e75ade4(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:47,417 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:47,417 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=12, startTime=1731328907236; duration=0sec 2024-11-11T12:41:47,419 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:47,419 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:41:47,419 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:47,423 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:47,423 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:41:47,424 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:47,424 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/d9aea97c714c4f2b80e46314203afec3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/540dcfe32bf144478f794580454e5922, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/293e90c3ddd847d7a2405afda7404815, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f0f0394872f40c682e9b01a1a351917] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=46.9 K 2024-11-11T12:41:47,425 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d9aea97c714c4f2b80e46314203afec3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=14, earliestPutTs=1731328904228 2024-11-11T12:41:47,427 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 540dcfe32bf144478f794580454e5922, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731328904481 2024-11-11T12:41:47,428 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 293e90c3ddd847d7a2405afda7404815, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731328904841 2024-11-11T12:41:47,430 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f0f0394872f40c682e9b01a1a351917, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731328906065 2024-11-11T12:41:47,455 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/652993a0ac124b73b839181033be81a6 is 50, key is test_row_0/C:col10/1731328906173/Put/seqid=0 2024-11-11T12:41:47,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-11T12:41:47,478 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#17 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:47,480 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/62a3956feea64c3ea367b5a9cab87ae5 is 50, key is test_row_0/C:col10/1731328906065/Put/seqid=0 2024-11-11T12:41:47,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741855_1031 (size=12001) 2024-11-11T12:41:47,529 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/652993a0ac124b73b839181033be81a6 2024-11-11T12:41:47,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328967517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,537 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328967517, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,544 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328967524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,543 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/595810c80a84428888e00893f35cc2db as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/595810c80a84428888e00893f35cc2db 2024-11-11T12:41:47,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328967520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,546 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328967524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,570 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/595810c80a84428888e00893f35cc2db, entries=150, sequenceid=89, filesize=11.7 K 2024-11-11T12:41:47,573 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/eec19bb8cf584693a2aa3ad7dac623cf as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/eec19bb8cf584693a2aa3ad7dac623cf 2024-11-11T12:41:47,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741856_1032 (size=12139) 2024-11-11T12:41:47,585 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/eec19bb8cf584693a2aa3ad7dac623cf, entries=150, sequenceid=89, filesize=11.7 K 2024-11-11T12:41:47,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/652993a0ac124b73b839181033be81a6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/652993a0ac124b73b839181033be81a6 2024-11-11T12:41:47,604 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/652993a0ac124b73b839181033be81a6, entries=150, sequenceid=89, filesize=11.7 K 2024-11-11T12:41:47,609 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 0a6a9f82df0ac9ece8343137343e2f72 in 335ms, sequenceid=89, compaction requested=false 2024-11-11T12:41:47,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:47,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:47,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=15}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=15 2024-11-11T12:41:47,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=15 2024-11-11T12:41:47,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=14 2024-11-11T12:41:47,617 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2840 sec 2024-11-11T12:41:47,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=14, table=TestAcidGuarantees in 1.2960 sec 2024-11-11T12:41:47,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:47,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:41:47,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:47,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:47,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:47,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:47,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:47,769 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:47,785 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/2ab5dd805fad4a789fb1a43f440514e3 is 50, key is test_row_0/A:col10/1731328907751/Put/seqid=0 2024-11-11T12:41:47,802 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/e43491779e7c45b18fd160a121b47c67 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e43491779e7c45b18fd160a121b47c67 2024-11-11T12:41:47,821 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into e43491779e7c45b18fd160a121b47c67(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:47,822 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:47,822 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=12, startTime=1731328907221; duration=0sec 2024-11-11T12:41:47,822 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:47,822 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:41:47,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741857_1033 (size=9657) 2024-11-11T12:41:47,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/2ab5dd805fad4a789fb1a43f440514e3 2024-11-11T12:41:47,837 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328967825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328967827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,840 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,840 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328967829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328967833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,842 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328967837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,856 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/2cf0fee2544c4cd9b80bb7b5f9428a8c is 50, key is test_row_0/B:col10/1731328907751/Put/seqid=0 2024-11-11T12:41:47,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741858_1034 (size=9657) 2024-11-11T12:41:47,909 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/2cf0fee2544c4cd9b80bb7b5f9428a8c 2024-11-11T12:41:47,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328967943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328967944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328967944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,951 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328967945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:47,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328967945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:47,959 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8e8cebdd81554fc59810bc2971a656b6 is 50, key is test_row_0/C:col10/1731328907751/Put/seqid=0 2024-11-11T12:41:47,997 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/62a3956feea64c3ea367b5a9cab87ae5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/62a3956feea64c3ea367b5a9cab87ae5 2024-11-11T12:41:48,003 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741859_1035 (size=9657) 2024-11-11T12:41:48,019 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 62a3956feea64c3ea367b5a9cab87ae5(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:48,020 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:48,020 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=12, startTime=1731328907237; duration=0sec 2024-11-11T12:41:48,020 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:48,020 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:41:48,156 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328968149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,158 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328968154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328968154, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,163 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328968156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328968165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,407 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=102 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8e8cebdd81554fc59810bc2971a656b6 2024-11-11T12:41:48,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/2ab5dd805fad4a789fb1a43f440514e3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2ab5dd805fad4a789fb1a43f440514e3 2024-11-11T12:41:48,443 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2ab5dd805fad4a789fb1a43f440514e3, entries=100, sequenceid=102, filesize=9.4 K 2024-11-11T12:41:48,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/2cf0fee2544c4cd9b80bb7b5f9428a8c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/2cf0fee2544c4cd9b80bb7b5f9428a8c 2024-11-11T12:41:48,458 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/2cf0fee2544c4cd9b80bb7b5f9428a8c, entries=100, sequenceid=102, filesize=9.4 K 2024-11-11T12:41:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=14 2024-11-11T12:41:48,459 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 14 completed 2024-11-11T12:41:48,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8e8cebdd81554fc59810bc2971a656b6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8e8cebdd81554fc59810bc2971a656b6 2024-11-11T12:41:48,464 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:41:48,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees 2024-11-11T12:41:48,465 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-11T12:41:48,468 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:41:48,470 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=16, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:41:48,470 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=17, ppid=16, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:41:48,471 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8e8cebdd81554fc59810bc2971a656b6, entries=100, sequenceid=102, filesize=9.4 K 2024-11-11T12:41:48,473 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 0a6a9f82df0ac9ece8343137343e2f72 in 715ms, sequenceid=102, compaction requested=true 2024-11-11T12:41:48,473 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:48,473 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:48,475 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:48,475 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:41:48,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328968461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,475 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,476 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e43491779e7c45b18fd160a121b47c67, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/595810c80a84428888e00893f35cc2db, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2ab5dd805fad4a789fb1a43f440514e3] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=33.0 K 2024-11-11T12:41:48,476 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e43491779e7c45b18fd160a121b47c67, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731328906065 2024-11-11T12:41:48,477 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 595810c80a84428888e00893f35cc2db, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731328906173 2024-11-11T12:41:48,478 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ab5dd805fad4a789fb1a43f440514e3, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731328907751 2024-11-11T12:41:48,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:48,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:41:48,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:48,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:41:48,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:48,489 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:48,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:41:48,490 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:48,490 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-11T12:41:48,491 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:48,491 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:41:48,492 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,492 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/13754117f51946ffbf487e065e75ade4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/eec19bb8cf584693a2aa3ad7dac623cf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/2cf0fee2544c4cd9b80bb7b5f9428a8c] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=33.0 K 2024-11-11T12:41:48,493 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 13754117f51946ffbf487e065e75ade4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731328906065 2024-11-11T12:41:48,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:48,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:48,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:48,493 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:48,493 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting eec19bb8cf584693a2aa3ad7dac623cf, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731328906173 2024-11-11T12:41:48,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:48,494 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:48,495 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 2cf0fee2544c4cd9b80bb7b5f9428a8c, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731328907751 2024-11-11T12:41:48,500 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#21 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:48,501 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/8b41402ddb154874a7f7c262fcfccb94 is 50, key is test_row_0/A:col10/1731328907751/Put/seqid=0 2024-11-11T12:41:48,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328968499, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328968500, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,510 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/a67745ec0e534241b253aec1af322d56 is 50, key is test_row_0/A:col10/1731328907834/Put/seqid=0 2024-11-11T12:41:48,510 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328968506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328968509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,519 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#23 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:48,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741860_1036 (size=12241) 2024-11-11T12:41:48,520 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/ff2938e9148542d4bfe1c1ac34764dfd is 50, key is test_row_0/B:col10/1731328907751/Put/seqid=0 2024-11-11T12:41:48,538 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/8b41402ddb154874a7f7c262fcfccb94 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8b41402ddb154874a7f7c262fcfccb94 2024-11-11T12:41:48,550 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741861_1037 (size=14491) 2024-11-11T12:41:48,554 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into 8b41402ddb154874a7f7c262fcfccb94(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:48,554 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:48,554 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=13, startTime=1731328908473; duration=0sec 2024-11-11T12:41:48,554 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:48,555 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:41:48,555 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:48,562 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33797 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:48,563 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:41:48,563 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,563 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/62a3956feea64c3ea367b5a9cab87ae5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/652993a0ac124b73b839181033be81a6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8e8cebdd81554fc59810bc2971a656b6] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=33.0 K 2024-11-11T12:41:48,564 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 62a3956feea64c3ea367b5a9cab87ae5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=63, earliestPutTs=1731328906065 2024-11-11T12:41:48,564 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 652993a0ac124b73b839181033be81a6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731328906173 2024-11-11T12:41:48,565 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8e8cebdd81554fc59810bc2971a656b6, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731328907751 2024-11-11T12:41:48,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-11T12:41:48,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741862_1038 (size=12241) 2024-11-11T12:41:48,598 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/ff2938e9148542d4bfe1c1ac34764dfd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ff2938e9148542d4bfe1c1ac34764dfd 2024-11-11T12:41:48,609 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into ff2938e9148542d4bfe1c1ac34764dfd(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:48,610 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:48,610 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=13, startTime=1731328908489; duration=0sec 2024-11-11T12:41:48,611 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:48,612 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:41:48,616 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328968611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328968611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,617 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328968615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,619 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#24 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:48,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,620 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/49b2501af9d946d89928dc7b48af7c85 is 50, key is test_row_0/C:col10/1731328907751/Put/seqid=0 2024-11-11T12:41:48,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328968619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,624 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,625 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-11T12:41:48,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:48,625 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,625 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,626 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741863_1039 (size=12241) 2024-11-11T12:41:48,672 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/49b2501af9d946d89928dc7b48af7c85 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/49b2501af9d946d89928dc7b48af7c85 2024-11-11T12:41:48,685 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 49b2501af9d946d89928dc7b48af7c85(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:48,686 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:48,686 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=13, startTime=1731328908489; duration=0sec 2024-11-11T12:41:48,686 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:48,686 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:41:48,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-11T12:41:48,788 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-11T12:41:48,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:48,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,790 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,825 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328968822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,827 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328968823, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,829 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328968819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328968828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,950 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-11T12:41:48,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:48,951 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:48,951 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,953 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/a67745ec0e534241b253aec1af322d56 2024-11-11T12:41:48,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:48,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:48,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328968989, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:48,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/c6809e4020064ef286bc65bd88923d7d is 50, key is test_row_0/B:col10/1731328907834/Put/seqid=0 2024-11-11T12:41:49,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741864_1040 (size=12101) 2024-11-11T12:41:49,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/c6809e4020064ef286bc65bd88923d7d 2024-11-11T12:41:49,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-11T12:41:49,102 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/60768f30dce74062b5b05a2056ca116d is 50, key is test_row_0/C:col10/1731328907834/Put/seqid=0 2024-11-11T12:41:49,106 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,107 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-11T12:41:49,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,112 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] handler.RSProcedureHandler(58): pid=17 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,112 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=17 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=17 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,141 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328969139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,142 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328969139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328969139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,143 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328969139, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741865_1041 (size=12101) 2024-11-11T12:41:49,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=58.14 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/60768f30dce74062b5b05a2056ca116d 2024-11-11T12:41:49,160 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/a67745ec0e534241b253aec1af322d56 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a67745ec0e534241b253aec1af322d56 2024-11-11T12:41:49,171 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a67745ec0e534241b253aec1af322d56, entries=200, sequenceid=133, filesize=14.2 K 2024-11-11T12:41:49,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/c6809e4020064ef286bc65bd88923d7d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c6809e4020064ef286bc65bd88923d7d 2024-11-11T12:41:49,202 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c6809e4020064ef286bc65bd88923d7d, entries=150, sequenceid=133, filesize=11.8 K 2024-11-11T12:41:49,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/60768f30dce74062b5b05a2056ca116d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/60768f30dce74062b5b05a2056ca116d 2024-11-11T12:41:49,217 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/60768f30dce74062b5b05a2056ca116d, entries=150, sequenceid=133, filesize=11.8 K 2024-11-11T12:41:49,220 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~174.43 KB/178620, heapSize ~457.73 KB/468720, currentSize=26.84 KB/27480 for 0a6a9f82df0ac9ece8343137343e2f72 in 729ms, sequenceid=133, compaction requested=false 2024-11-11T12:41:49,220 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:49,270 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,271 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=17 2024-11-11T12:41:49,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,272 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-11T12:41:49,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:49,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:49,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:49,272 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:49,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:49,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:49,291 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/739b9cb76e93401185049eb82d3d2231 is 50, key is test_row_0/A:col10/1731328908498/Put/seqid=0 2024-11-11T12:41:49,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741866_1042 (size=9757) 2024-11-11T12:41:49,304 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/739b9cb76e93401185049eb82d3d2231 2024-11-11T12:41:49,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/4c0c913b27db42a9b003bdbf2abb7a03 is 50, key is test_row_0/B:col10/1731328908498/Put/seqid=0 2024-11-11T12:41:49,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741867_1043 (size=9757) 2024-11-11T12:41:49,390 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/4c0c913b27db42a9b003bdbf2abb7a03 2024-11-11T12:41:49,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/3e294efcbaa04d3abe887252cb206fc2 is 50, key is test_row_0/C:col10/1731328908498/Put/seqid=0 2024-11-11T12:41:49,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741868_1044 (size=9757) 2024-11-11T12:41:49,420 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=143 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/3e294efcbaa04d3abe887252cb206fc2 2024-11-11T12:41:49,431 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/739b9cb76e93401185049eb82d3d2231 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/739b9cb76e93401185049eb82d3d2231 2024-11-11T12:41:49,441 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/739b9cb76e93401185049eb82d3d2231, entries=100, sequenceid=143, filesize=9.5 K 2024-11-11T12:41:49,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/4c0c913b27db42a9b003bdbf2abb7a03 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4c0c913b27db42a9b003bdbf2abb7a03 2024-11-11T12:41:49,462 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4c0c913b27db42a9b003bdbf2abb7a03, entries=100, sequenceid=143, filesize=9.5 K 2024-11-11T12:41:49,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/3e294efcbaa04d3abe887252cb206fc2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/3e294efcbaa04d3abe887252cb206fc2 2024-11-11T12:41:49,480 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/3e294efcbaa04d3abe887252cb206fc2, entries=100, sequenceid=143, filesize=9.5 K 2024-11-11T12:41:49,481 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 0a6a9f82df0ac9ece8343137343e2f72 in 210ms, sequenceid=143, compaction requested=true 2024-11-11T12:41:49,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:49,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-11T12:41:49,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=17 2024-11-11T12:41:49,487 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=17, resume processing ppid=16 2024-11-11T12:41:49,488 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, ppid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0140 sec 2024-11-11T12:41:49,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=16, table=TestAcidGuarantees in 1.0240 sec 2024-11-11T12:41:49,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=16 2024-11-11T12:41:49,575 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 16 completed 2024-11-11T12:41:49,578 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:41:49,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees 2024-11-11T12:41:49,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-11T12:41:49,581 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:41:49,582 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=18, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:41:49,582 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:41:49,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:49,681 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:41:49,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:49,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:49,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:49,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:49,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:49,682 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:49,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-11T12:41:49,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/212d721c1597484fa3c8e3806edfd384 is 50, key is test_row_0/A:col10/1731328909677/Put/seqid=0 2024-11-11T12:41:49,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741869_1045 (size=12151) 2024-11-11T12:41:49,731 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/212d721c1597484fa3c8e3806edfd384 2024-11-11T12:41:49,735 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:49,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:49,736 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,736 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,738 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328969734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328969734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,738 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328969735, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,739 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328969738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,758 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/bb39bcb80b4a4255a39c61b1f0a317ce is 50, key is test_row_0/B:col10/1731328909677/Put/seqid=0 2024-11-11T12:41:49,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741870_1046 (size=12151) 2024-11-11T12:41:49,820 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/bb39bcb80b4a4255a39c61b1f0a317ce 2024-11-11T12:41:49,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328969841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,843 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,844 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328969842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328969841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,845 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:49,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328969842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,854 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/93ef6e858f3747eb9b6d75700ccf25bd is 50, key is test_row_0/C:col10/1731328909677/Put/seqid=0 2024-11-11T12:41:49,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-11T12:41:49,899 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:49,901 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:49,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:49,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,904 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,904 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:49,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741871_1047 (size=12151) 2024-11-11T12:41:49,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=156 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/93ef6e858f3747eb9b6d75700ccf25bd 2024-11-11T12:41:49,931 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/212d721c1597484fa3c8e3806edfd384 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/212d721c1597484fa3c8e3806edfd384 2024-11-11T12:41:49,941 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/212d721c1597484fa3c8e3806edfd384, entries=150, sequenceid=156, filesize=11.9 K 2024-11-11T12:41:49,942 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/bb39bcb80b4a4255a39c61b1f0a317ce as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/bb39bcb80b4a4255a39c61b1f0a317ce 2024-11-11T12:41:49,956 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/bb39bcb80b4a4255a39c61b1f0a317ce, entries=150, sequenceid=156, filesize=11.9 K 2024-11-11T12:41:49,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/93ef6e858f3747eb9b6d75700ccf25bd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/93ef6e858f3747eb9b6d75700ccf25bd 2024-11-11T12:41:49,969 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/93ef6e858f3747eb9b6d75700ccf25bd, entries=150, sequenceid=156, filesize=11.9 K 2024-11-11T12:41:49,971 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 0a6a9f82df0ac9ece8343137343e2f72 in 291ms, sequenceid=156, compaction requested=true 2024-11-11T12:41:49,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:49,972 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:49,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:41:49,973 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:49,973 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:49,979 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48640 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:49,979 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:41:49,979 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,979 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8b41402ddb154874a7f7c262fcfccb94, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a67745ec0e534241b253aec1af322d56, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/739b9cb76e93401185049eb82d3d2231, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/212d721c1597484fa3c8e3806edfd384] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=47.5 K 2024-11-11T12:41:49,980 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b41402ddb154874a7f7c262fcfccb94, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731328906202 2024-11-11T12:41:49,981 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting a67745ec0e534241b253aec1af322d56, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1731328907834 2024-11-11T12:41:49,982 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46250 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:49,982 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:41:49,982 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:49,982 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 739b9cb76e93401185049eb82d3d2231, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731328908494 2024-11-11T12:41:49,982 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ff2938e9148542d4bfe1c1ac34764dfd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c6809e4020064ef286bc65bd88923d7d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4c0c913b27db42a9b003bdbf2abb7a03, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/bb39bcb80b4a4255a39c61b1f0a317ce] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=45.2 K 2024-11-11T12:41:49,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:41:49,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:49,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:41:49,983 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:49,990 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 212d721c1597484fa3c8e3806edfd384, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731328909669 2024-11-11T12:41:49,990 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ff2938e9148542d4bfe1c1ac34764dfd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731328906202 2024-11-11T12:41:49,991 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c6809e4020064ef286bc65bd88923d7d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1731328907834 2024-11-11T12:41:49,992 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c0c913b27db42a9b003bdbf2abb7a03, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731328908494 2024-11-11T12:41:49,993 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bb39bcb80b4a4255a39c61b1f0a317ce, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731328909669 2024-11-11T12:41:50,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:41:50,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:50,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:50,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:50,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:50,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:50,008 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:50,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:50,022 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#33 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:50,024 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/2aaf8bfee97641bba6b22aabc2c732ff is 50, key is test_row_0/A:col10/1731328909677/Put/seqid=0 2024-11-11T12:41:50,030 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#34 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:50,031 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/95f43ce6616d4e448bb7826a310ca256 is 50, key is test_row_0/B:col10/1731328909677/Put/seqid=0 2024-11-11T12:41:50,033 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/140f9a56bced4bb89fc29a4a0c01d191 is 50, key is test_row_0/A:col10/1731328909736/Put/seqid=0 2024-11-11T12:41:50,049 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328970046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328970047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328970047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,050 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328970047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328970048, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,059 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,060 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,060 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,060 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741872_1048 (size=12527) 2024-11-11T12:41:50,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741874_1050 (size=12151) 2024-11-11T12:41:50,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/140f9a56bced4bb89fc29a4a0c01d191 2024-11-11T12:41:50,081 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/2aaf8bfee97641bba6b22aabc2c732ff as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2aaf8bfee97641bba6b22aabc2c732ff 2024-11-11T12:41:50,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741873_1049 (size=12527) 2024-11-11T12:41:50,095 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into 2aaf8bfee97641bba6b22aabc2c732ff(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:50,095 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:50,095 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=12, startTime=1731328909972; duration=0sec 2024-11-11T12:41:50,096 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:50,096 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:41:50,096 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:50,098 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46250 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:50,098 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:41:50,098 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,099 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/49b2501af9d946d89928dc7b48af7c85, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/60768f30dce74062b5b05a2056ca116d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/3e294efcbaa04d3abe887252cb206fc2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/93ef6e858f3747eb9b6d75700ccf25bd] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=45.2 K 2024-11-11T12:41:50,099 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49b2501af9d946d89928dc7b48af7c85, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=102, earliestPutTs=1731328906202 2024-11-11T12:41:50,100 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60768f30dce74062b5b05a2056ca116d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1731328907834 2024-11-11T12:41:50,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/28a039f20feb4230add972ef87868bfd is 50, key is test_row_0/B:col10/1731328909736/Put/seqid=0 2024-11-11T12:41:50,103 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e294efcbaa04d3abe887252cb206fc2, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=143, earliestPutTs=1731328908494 2024-11-11T12:41:50,104 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 93ef6e858f3747eb9b6d75700ccf25bd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731328909669 2024-11-11T12:41:50,105 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/95f43ce6616d4e448bb7826a310ca256 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/95f43ce6616d4e448bb7826a310ca256 2024-11-11T12:41:50,125 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 95f43ce6616d4e448bb7826a310ca256(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:50,125 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:50,125 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=12, startTime=1731328909973; duration=0sec 2024-11-11T12:41:50,125 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:50,125 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:41:50,127 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#37 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:50,128 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/56740a23db4b4adcb627e76f2aa29208 is 50, key is test_row_0/C:col10/1731328909677/Put/seqid=0 2024-11-11T12:41:50,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741875_1051 (size=12151) 2024-11-11T12:41:50,138 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/28a039f20feb4230add972ef87868bfd 2024-11-11T12:41:50,153 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328970152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,160 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/5d5a93697497481dbda18c20b60a18b9 is 50, key is test_row_0/C:col10/1731328909736/Put/seqid=0 2024-11-11T12:41:50,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741876_1052 (size=12527) 2024-11-11T12:41:50,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-11T12:41:50,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741877_1053 (size=12151) 2024-11-11T12:41:50,203 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=180 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/5d5a93697497481dbda18c20b60a18b9 2024-11-11T12:41:50,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/140f9a56bced4bb89fc29a4a0c01d191 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/140f9a56bced4bb89fc29a4a0c01d191 2024-11-11T12:41:50,214 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,215 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:50,215 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:50,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,216 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,230 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/140f9a56bced4bb89fc29a4a0c01d191, entries=150, sequenceid=180, filesize=11.9 K 2024-11-11T12:41:50,231 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/28a039f20feb4230add972ef87868bfd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/28a039f20feb4230add972ef87868bfd 2024-11-11T12:41:50,241 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/28a039f20feb4230add972ef87868bfd, entries=150, sequenceid=180, filesize=11.9 K 2024-11-11T12:41:50,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/5d5a93697497481dbda18c20b60a18b9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/5d5a93697497481dbda18c20b60a18b9 2024-11-11T12:41:50,250 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/5d5a93697497481dbda18c20b60a18b9, entries=150, sequenceid=180, filesize=11.9 K 2024-11-11T12:41:50,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 0a6a9f82df0ac9ece8343137343e2f72 in 245ms, sequenceid=180, compaction requested=false 2024-11-11T12:41:50,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:50,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:50,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:41:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:50,358 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:50,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:50,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/4e6ab55b40ee4bc0a845e72de17ea408 is 50, key is test_row_0/A:col10/1731328910044/Put/seqid=0 2024-11-11T12:41:50,370 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:50,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:50,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,395 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741878_1054 (size=14541) 2024-11-11T12:41:50,399 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328970393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,401 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328970393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,402 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328970395, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328970398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/4e6ab55b40ee4bc0a845e72de17ea408 2024-11-11T12:41:50,418 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328970418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,478 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/3bd0bfec634a4a50a7f934d762555bb9 is 50, key is test_row_0/B:col10/1731328910044/Put/seqid=0 2024-11-11T12:41:50,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741879_1055 (size=12151) 2024-11-11T12:41:50,508 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/3bd0bfec634a4a50a7f934d762555bb9 2024-11-11T12:41:50,508 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,507 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328970504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328970501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328970510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,517 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328970514, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,524 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1335f177bd0a4c97a9f3bf1ec983e2ba is 50, key is test_row_0/C:col10/1731328910044/Put/seqid=0 2024-11-11T12:41:50,529 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,529 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,530 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:50,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328970527, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:50,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,530 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741880_1056 (size=12151) 2024-11-11T12:41:50,589 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/56740a23db4b4adcb627e76f2aa29208 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/56740a23db4b4adcb627e76f2aa29208 2024-11-11T12:41:50,602 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 56740a23db4b4adcb627e76f2aa29208(size=12.2 K), total size for store is 24.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:50,602 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:50,602 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=12, startTime=1731328909983; duration=0sec 2024-11-11T12:41:50,603 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:50,603 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:41:50,683 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:50,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-11T12:41:50,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:50,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,687 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,714 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,715 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328970713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328970713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,716 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328970713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,722 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328970720, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:50,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328970737, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,840 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:50,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,841 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:50,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:50,846 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:50,955 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=195 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1335f177bd0a4c97a9f3bf1ec983e2ba 2024-11-11T12:41:50,965 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/4e6ab55b40ee4bc0a845e72de17ea408 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4e6ab55b40ee4bc0a845e72de17ea408 2024-11-11T12:41:50,975 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4e6ab55b40ee4bc0a845e72de17ea408, entries=200, sequenceid=195, filesize=14.2 K 2024-11-11T12:41:50,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/3bd0bfec634a4a50a7f934d762555bb9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3bd0bfec634a4a50a7f934d762555bb9 2024-11-11T12:41:50,985 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3bd0bfec634a4a50a7f934d762555bb9, entries=150, sequenceid=195, filesize=11.9 K 2024-11-11T12:41:50,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1335f177bd0a4c97a9f3bf1ec983e2ba as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1335f177bd0a4c97a9f3bf1ec983e2ba 2024-11-11T12:41:50,999 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1335f177bd0a4c97a9f3bf1ec983e2ba, entries=150, sequenceid=195, filesize=11.9 K 2024-11-11T12:41:51,000 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,003 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 0a6a9f82df0ac9ece8343137343e2f72 in 646ms, sequenceid=195, compaction requested=true 2024-11-11T12:41:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:51,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:41:51,003 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:51,003 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:51,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:41:51,003 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:51,003 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,003 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:51,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:41:51,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:51,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,005 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 39219 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:51,005 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:41:51,005 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,006 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2aaf8bfee97641bba6b22aabc2c732ff, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/140f9a56bced4bb89fc29a4a0c01d191, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4e6ab55b40ee4bc0a845e72de17ea408] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=38.3 K 2024-11-11T12:41:51,006 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:51,006 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:41:51,006 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,006 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/95f43ce6616d4e448bb7826a310ca256, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/28a039f20feb4230add972ef87868bfd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3bd0bfec634a4a50a7f934d762555bb9] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=36.0 K 2024-11-11T12:41:51,007 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2aaf8bfee97641bba6b22aabc2c732ff, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731328909669 2024-11-11T12:41:51,007 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 95f43ce6616d4e448bb7826a310ca256, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731328909669 2024-11-11T12:41:51,007 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 140f9a56bced4bb89fc29a4a0c01d191, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1731328909733 2024-11-11T12:41:51,007 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 28a039f20feb4230add972ef87868bfd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1731328909733 2024-11-11T12:41:51,008 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e6ab55b40ee4bc0a845e72de17ea408, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731328910028 2024-11-11T12:41:51,008 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3bd0bfec634a4a50a7f934d762555bb9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731328910028 2024-11-11T12:41:51,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:51,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:41:51,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:51,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:51,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:51,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:51,026 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:51,027 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:51,036 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#42 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:51,037 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/f29a09338e1141b0a415d0d0de27adc2 is 50, key is test_row_0/B:col10/1731328910044/Put/seqid=0 2024-11-11T12:41:51,046 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#43 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:51,048 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/dfb70cfdf22047c3927b4e39a5725a61 is 50, key is test_row_0/A:col10/1731328910044/Put/seqid=0 2024-11-11T12:41:51,050 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/80dbbb17ff594096b0437a3226934c4e is 50, key is test_row_0/A:col10/1731328910392/Put/seqid=0 2024-11-11T12:41:51,057 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328971050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328971051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,060 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328971054, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328971060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,063 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328971061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741881_1057 (size=12629) 2024-11-11T12:41:51,095 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/f29a09338e1141b0a415d0d0de27adc2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f29a09338e1141b0a415d0d0de27adc2 2024-11-11T12:41:51,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741883_1059 (size=12629) 2024-11-11T12:41:51,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741882_1058 (size=14541) 2024-11-11T12:41:51,127 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into f29a09338e1141b0a415d0d0de27adc2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:51,127 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:51,127 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=13, startTime=1731328911003; duration=0sec 2024-11-11T12:41:51,127 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:51,127 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:41:51,127 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:51,129 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36829 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:51,129 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:41:51,129 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,130 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/56740a23db4b4adcb627e76f2aa29208, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/5d5a93697497481dbda18c20b60a18b9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1335f177bd0a4c97a9f3bf1ec983e2ba] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=36.0 K 2024-11-11T12:41:51,130 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 56740a23db4b4adcb627e76f2aa29208, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=156, earliestPutTs=1731328909669 2024-11-11T12:41:51,131 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 5d5a93697497481dbda18c20b60a18b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=180, earliestPutTs=1731328909733 2024-11-11T12:41:51,132 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1335f177bd0a4c97a9f3bf1ec983e2ba, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731328910028 2024-11-11T12:41:51,152 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#45 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:51,153 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/46439a2f45d2460eb751aa81a96dd3c3 is 50, key is test_row_0/C:col10/1731328910044/Put/seqid=0 2024-11-11T12:41:51,156 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:51,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:51,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,157 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,158 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,162 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,173 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328971163, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328971159, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328971162, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,184 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328971177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,185 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328971176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741884_1060 (size=12629) 2024-11-11T12:41:51,198 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/46439a2f45d2460eb751aa81a96dd3c3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/46439a2f45d2460eb751aa81a96dd3c3 2024-11-11T12:41:51,215 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 46439a2f45d2460eb751aa81a96dd3c3(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:51,215 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:51,215 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=13, startTime=1731328911003; duration=0sec 2024-11-11T12:41:51,215 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:51,215 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:41:51,311 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,312 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:51,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:51,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,312 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,312 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328971377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328971384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328971387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328971387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,389 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328971389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,466 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:51,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:51,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,467 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,467 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/80dbbb17ff594096b0437a3226934c4e 2024-11-11T12:41:51,528 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/dfb70cfdf22047c3927b4e39a5725a61 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/dfb70cfdf22047c3927b4e39a5725a61 2024-11-11T12:41:51,539 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into dfb70cfdf22047c3927b4e39a5725a61(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:51,541 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:51,541 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=13, startTime=1731328911003; duration=0sec 2024-11-11T12:41:51,541 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:51,541 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:41:51,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/0e87f5c58036436d98b0e690684a9fe4 is 50, key is test_row_0/B:col10/1731328910392/Put/seqid=0 2024-11-11T12:41:51,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741885_1061 (size=12151) 2024-11-11T12:41:51,591 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/0e87f5c58036436d98b0e690684a9fe4 2024-11-11T12:41:51,606 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/a74ff1da92ea4ec4be882435eec03c35 is 50, key is test_row_0/C:col10/1731328910392/Put/seqid=0 2024-11-11T12:41:51,625 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,631 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:51,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:51,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,634 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] handler.RSProcedureHandler(58): pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=19 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=19 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:51,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741886_1062 (size=12151) 2024-11-11T12:41:51,652 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=221 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/a74ff1da92ea4ec4be882435eec03c35 2024-11-11T12:41:51,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/80dbbb17ff594096b0437a3226934c4e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/80dbbb17ff594096b0437a3226934c4e 2024-11-11T12:41:51,677 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/80dbbb17ff594096b0437a3226934c4e, entries=200, sequenceid=221, filesize=14.2 K 2024-11-11T12:41:51,680 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/0e87f5c58036436d98b0e690684a9fe4 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/0e87f5c58036436d98b0e690684a9fe4 2024-11-11T12:41:51,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328971681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-11T12:41:51,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/0e87f5c58036436d98b0e690684a9fe4, entries=150, sequenceid=221, filesize=11.9 K 2024-11-11T12:41:51,691 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/a74ff1da92ea4ec4be882435eec03c35 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a74ff1da92ea4ec4be882435eec03c35 2024-11-11T12:41:51,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328971690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,697 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328971692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:51,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328971693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a74ff1da92ea4ec4be882435eec03c35, entries=150, sequenceid=221, filesize=11.9 K 2024-11-11T12:41:51,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328971691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,700 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 0a6a9f82df0ac9ece8343137343e2f72 in 674ms, sequenceid=221, compaction requested=false 2024-11-11T12:41:51,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:51,788 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:51,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=19 2024-11-11T12:41:51,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:51,790 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:41:51,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:51,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:51,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:51,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:51,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:51,791 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:51,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/3e0644706af14c63bc575da469286f20 is 50, key is test_row_0/A:col10/1731328911051/Put/seqid=0 2024-11-11T12:41:51,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741887_1063 (size=12151) 2024-11-11T12:41:52,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:52,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:52,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,218 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328972213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328972215, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,219 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328972216, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,220 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328972217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328972220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,273 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/3e0644706af14c63bc575da469286f20 2024-11-11T12:41:52,303 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/52b4cf83763349f9828582120cd737d3 is 50, key is test_row_0/B:col10/1731328911051/Put/seqid=0 2024-11-11T12:41:52,321 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328972321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,322 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328972321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,325 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328972321, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328972323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328972323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741888_1064 (size=12151) 2024-11-11T12:41:52,528 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328972525, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328972526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,531 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328972528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328972530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,532 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328972530, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,734 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/52b4cf83763349f9828582120cd737d3 2024-11-11T12:41:52,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/191ab731b88746cea8de41ba7f720df1 is 50, key is test_row_0/C:col10/1731328911051/Put/seqid=0 2024-11-11T12:41:52,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741889_1065 (size=12151) 2024-11-11T12:41:52,763 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/191ab731b88746cea8de41ba7f720df1 2024-11-11T12:41:52,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/3e0644706af14c63bc575da469286f20 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3e0644706af14c63bc575da469286f20 2024-11-11T12:41:52,801 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3e0644706af14c63bc575da469286f20, entries=150, sequenceid=235, filesize=11.9 K 2024-11-11T12:41:52,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/52b4cf83763349f9828582120cd737d3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/52b4cf83763349f9828582120cd737d3 2024-11-11T12:41:52,809 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/52b4cf83763349f9828582120cd737d3, entries=150, sequenceid=235, filesize=11.9 K 2024-11-11T12:41:52,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/191ab731b88746cea8de41ba7f720df1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/191ab731b88746cea8de41ba7f720df1 2024-11-11T12:41:52,820 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/191ab731b88746cea8de41ba7f720df1, entries=150, sequenceid=235, filesize=11.9 K 2024-11-11T12:41:52,827 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 0a6a9f82df0ac9ece8343137343e2f72 in 1036ms, sequenceid=235, compaction requested=true 2024-11-11T12:41:52,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:52,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:52,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-11-11T12:41:52,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-11-11T12:41:52,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=19, resume processing ppid=18 2024-11-11T12:41:52,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2470 sec 2024-11-11T12:41:52,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:52,835 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=18, table=TestAcidGuarantees in 3.2550 sec 2024-11-11T12:41:52,835 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-11T12:41:52,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:52,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:52,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:52,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:52,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:52,836 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:52,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/e26410145d4f41d698f82ae7bb8e2067 is 50, key is test_row_0/A:col10/1731328912213/Put/seqid=0 2024-11-11T12:41:52,850 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328972844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328972848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,857 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328972850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,857 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328972851, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,858 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328972853, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741890_1066 (size=14741) 2024-11-11T12:41:52,871 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/e26410145d4f41d698f82ae7bb8e2067 2024-11-11T12:41:52,899 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/fef0ae4cd925473f9ccf12510a913d13 is 50, key is test_row_0/B:col10/1731328912213/Put/seqid=0 2024-11-11T12:41:52,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741891_1067 (size=12301) 2024-11-11T12:41:52,907 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/fef0ae4cd925473f9ccf12510a913d13 2024-11-11T12:41:52,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8c9a71df51c34a968069e3b2d41d19a5 is 50, key is test_row_0/C:col10/1731328912213/Put/seqid=0 2024-11-11T12:41:52,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741892_1068 (size=12301) 2024-11-11T12:41:52,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328972953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328972959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,961 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328972959, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328972960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:52,962 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:52,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328972961, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,159 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328973155, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328973164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,172 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328973169, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328973165, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,176 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328973172, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,331 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8c9a71df51c34a968069e3b2d41d19a5 2024-11-11T12:41:53,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/e26410145d4f41d698f82ae7bb8e2067 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e26410145d4f41d698f82ae7bb8e2067 2024-11-11T12:41:53,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e26410145d4f41d698f82ae7bb8e2067, entries=200, sequenceid=261, filesize=14.4 K 2024-11-11T12:41:53,366 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/fef0ae4cd925473f9ccf12510a913d13 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fef0ae4cd925473f9ccf12510a913d13 2024-11-11T12:41:53,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fef0ae4cd925473f9ccf12510a913d13, entries=150, sequenceid=261, filesize=12.0 K 2024-11-11T12:41:53,390 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8c9a71df51c34a968069e3b2d41d19a5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8c9a71df51c34a968069e3b2d41d19a5 2024-11-11T12:41:53,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8c9a71df51c34a968069e3b2d41d19a5, entries=150, sequenceid=261, filesize=12.0 K 2024-11-11T12:41:53,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for 0a6a9f82df0ac9ece8343137343e2f72 in 568ms, sequenceid=261, compaction requested=true 2024-11-11T12:41:53,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:53,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:41:53,404 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:53,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:53,404 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:53,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:41:53,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:53,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:41:53,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:53,407 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 54062 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:53,407 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:53,407 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:41:53,407 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:41:53,407 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:53,407 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:53,407 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f29a09338e1141b0a415d0d0de27adc2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/0e87f5c58036436d98b0e690684a9fe4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/52b4cf83763349f9828582120cd737d3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fef0ae4cd925473f9ccf12510a913d13] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=48.1 K 2024-11-11T12:41:53,407 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/dfb70cfdf22047c3927b4e39a5725a61, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/80dbbb17ff594096b0437a3226934c4e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3e0644706af14c63bc575da469286f20, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e26410145d4f41d698f82ae7bb8e2067] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=52.8 K 2024-11-11T12:41:53,408 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f29a09338e1141b0a415d0d0de27adc2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731328910028 2024-11-11T12:41:53,408 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0e87f5c58036436d98b0e690684a9fe4, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1731328910392 2024-11-11T12:41:53,408 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting dfb70cfdf22047c3927b4e39a5725a61, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731328910028 2024-11-11T12:41:53,409 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80dbbb17ff594096b0437a3226934c4e, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1731328910389 2024-11-11T12:41:53,409 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 52b4cf83763349f9828582120cd737d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731328911051 2024-11-11T12:41:53,410 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting fef0ae4cd925473f9ccf12510a913d13, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731328912213 2024-11-11T12:41:53,410 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e0644706af14c63bc575da469286f20, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731328911051 2024-11-11T12:41:53,410 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e26410145d4f41d698f82ae7bb8e2067, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731328912213 2024-11-11T12:41:53,437 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#54 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:53,438 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/f3a065dd542c4124949e23ee25867a0e is 50, key is test_row_0/A:col10/1731328912213/Put/seqid=0 2024-11-11T12:41:53,452 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#55 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:53,453 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/4dfd418c85ad4ad0a059a184ad233d21 is 50, key is test_row_0/B:col10/1731328912213/Put/seqid=0 2024-11-11T12:41:53,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:53,483 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:41:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:53,484 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:53,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741893_1069 (size=12915) 2024-11-11T12:41:53,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741894_1070 (size=12915) 2024-11-11T12:41:53,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/6aaf745770d142ffa3130539a2d9f17a is 50, key is test_row_0/A:col10/1731328913479/Put/seqid=0 2024-11-11T12:41:53,504 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/f3a065dd542c4124949e23ee25867a0e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f3a065dd542c4124949e23ee25867a0e 2024-11-11T12:41:53,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741895_1071 (size=12301) 2024-11-11T12:41:53,518 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/6aaf745770d142ffa3130539a2d9f17a 2024-11-11T12:41:53,527 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/097207f56fa24fd7a42aa7a1fdd34450 is 50, key is test_row_0/B:col10/1731328913479/Put/seqid=0 2024-11-11T12:41:53,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741896_1072 (size=12301) 2024-11-11T12:41:53,531 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/097207f56fa24fd7a42aa7a1fdd34450 2024-11-11T12:41:53,533 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into f3a065dd542c4124949e23ee25867a0e(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:53,533 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:53,533 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=12, startTime=1731328913404; duration=0sec 2024-11-11T12:41:53,533 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:53,533 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:41:53,533 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:53,535 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49232 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:53,536 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:41:53,536 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:53,536 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/46439a2f45d2460eb751aa81a96dd3c3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a74ff1da92ea4ec4be882435eec03c35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/191ab731b88746cea8de41ba7f720df1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8c9a71df51c34a968069e3b2d41d19a5] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=48.1 K 2024-11-11T12:41:53,537 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46439a2f45d2460eb751aa81a96dd3c3, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=195, earliestPutTs=1731328910028 2024-11-11T12:41:53,538 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting a74ff1da92ea4ec4be882435eec03c35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=221, earliestPutTs=1731328910392 2024-11-11T12:41:53,538 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 191ab731b88746cea8de41ba7f720df1, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731328911051 2024-11-11T12:41:53,539 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c9a71df51c34a968069e3b2d41d19a5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731328912213 2024-11-11T12:41:53,549 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e5b75e23d78b4ef6945c41271415ea99 is 50, key is test_row_0/C:col10/1731328913479/Put/seqid=0 2024-11-11T12:41:53,550 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#59 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:53,551 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/ef821e9b4cca4d27974cda9a5e0d097f is 50, key is test_row_0/C:col10/1731328912213/Put/seqid=0 2024-11-11T12:41:53,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741898_1074 (size=12915) 2024-11-11T12:41:53,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741897_1073 (size=12301) 2024-11-11T12:41:53,584 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328973582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328973583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328973584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328973585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328973585, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=18 2024-11-11T12:41:53,691 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 18 completed 2024-11-11T12:41:53,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328973690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,693 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,694 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:41:53,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328973692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328973692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,695 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328973692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees 2024-11-11T12:41:53,695 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328973694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,697 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:41:53,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-11T12:41:53,700 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=20, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:41:53,700 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:41:53,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-11T12:41:53,856 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,857 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-11T12:41:53,857 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:53,858 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:53,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:53,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:53,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328973898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328973898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,900 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328973899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,902 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328973902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,907 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:53,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328973906, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:53,909 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/4dfd418c85ad4ad0a059a184ad233d21 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4dfd418c85ad4ad0a059a184ad233d21 2024-11-11T12:41:53,919 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 4dfd418c85ad4ad0a059a184ad233d21(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:53,920 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:53,920 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=12, startTime=1731328913404; duration=0sec 2024-11-11T12:41:53,920 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:53,920 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:41:53,978 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/ef821e9b4cca4d27974cda9a5e0d097f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ef821e9b4cca4d27974cda9a5e0d097f 2024-11-11T12:41:53,978 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=274 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e5b75e23d78b4ef6945c41271415ea99 2024-11-11T12:41:53,993 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into ef821e9b4cca4d27974cda9a5e0d097f(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:53,993 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:53,994 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=12, startTime=1731328913404; duration=0sec 2024-11-11T12:41:53,994 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:53,994 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:41:53,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/6aaf745770d142ffa3130539a2d9f17a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/6aaf745770d142ffa3130539a2d9f17a 2024-11-11T12:41:54,002 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/6aaf745770d142ffa3130539a2d9f17a, entries=150, sequenceid=274, filesize=12.0 K 2024-11-11T12:41:54,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-11T12:41:54,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/097207f56fa24fd7a42aa7a1fdd34450 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/097207f56fa24fd7a42aa7a1fdd34450 2024-11-11T12:41:54,012 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,013 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-11T12:41:54,014 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/097207f56fa24fd7a42aa7a1fdd34450, entries=150, sequenceid=274, filesize=12.0 K 2024-11-11T12:41:54,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:54,016 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e5b75e23d78b4ef6945c41271415ea99 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e5b75e23d78b4ef6945c41271415ea99 2024-11-11T12:41:54,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:54,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:54,017 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] handler.RSProcedureHandler(58): pid=21 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:54,017 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=21 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:54,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=21 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:54,049 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e5b75e23d78b4ef6945c41271415ea99, entries=150, sequenceid=274, filesize=12.0 K 2024-11-11T12:41:54,060 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 0a6a9f82df0ac9ece8343137343e2f72 in 577ms, sequenceid=274, compaction requested=false 2024-11-11T12:41:54,060 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:54,169 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,170 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=21 2024-11-11T12:41:54,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:54,170 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:41:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:54,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:54,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/f0c8f27e57d3414980f6d1da4c25298e is 50, key is test_row_0/A:col10/1731328913583/Put/seqid=0 2024-11-11T12:41:54,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741899_1075 (size=12301) 2024-11-11T12:41:54,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:54,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:54,222 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328974219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328974219, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,223 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328974220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,227 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328974222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,228 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328974222, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-11T12:41:54,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328974326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,329 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328974328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328974329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328974330, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328974331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,534 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328974531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328974533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,535 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328974533, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,535 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328974534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,538 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328974538, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,595 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/f0c8f27e57d3414980f6d1da4c25298e 2024-11-11T12:41:54,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/3fdd73c2ec24423486a8794eaf01016b is 50, key is test_row_0/B:col10/1731328913583/Put/seqid=0 2024-11-11T12:41:54,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741900_1076 (size=12301) 2024-11-11T12:41:54,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-11T12:41:54,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328974836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,838 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328974837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,841 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328974839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328974842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:54,845 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:54,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328974842, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,018 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/3fdd73c2ec24423486a8794eaf01016b 2024-11-11T12:41:55,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/a01863d085e645be9dc02e3612c9bb4b is 50, key is test_row_0/C:col10/1731328913583/Put/seqid=0 2024-11-11T12:41:55,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741901_1077 (size=12301) 2024-11-11T12:41:55,073 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/a01863d085e645be9dc02e3612c9bb4b 2024-11-11T12:41:55,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/f0c8f27e57d3414980f6d1da4c25298e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f0c8f27e57d3414980f6d1da4c25298e 2024-11-11T12:41:55,095 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f0c8f27e57d3414980f6d1da4c25298e, entries=150, sequenceid=300, filesize=12.0 K 2024-11-11T12:41:55,096 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/3fdd73c2ec24423486a8794eaf01016b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3fdd73c2ec24423486a8794eaf01016b 2024-11-11T12:41:55,110 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3fdd73c2ec24423486a8794eaf01016b, entries=150, sequenceid=300, filesize=12.0 K 2024-11-11T12:41:55,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/a01863d085e645be9dc02e3612c9bb4b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a01863d085e645be9dc02e3612c9bb4b 2024-11-11T12:41:55,119 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a01863d085e645be9dc02e3612c9bb4b, entries=150, sequenceid=300, filesize=12.0 K 2024-11-11T12:41:55,121 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 0a6a9f82df0ac9ece8343137343e2f72 in 951ms, sequenceid=300, compaction requested=true 2024-11-11T12:41:55,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:55,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:55,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-11-11T12:41:55,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-11-11T12:41:55,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=21, resume processing ppid=20 2024-11-11T12:41:55,125 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4230 sec 2024-11-11T12:41:55,128 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=20, table=TestAcidGuarantees in 1.4320 sec 2024-11-11T12:41:55,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:55,351 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-11T12:41:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:55,351 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:55,359 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/a4814220e7ac4861bc502d3d51723e7a is 50, key is test_row_0/A:col10/1731328915349/Put/seqid=0 2024-11-11T12:41:55,373 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328975369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328975370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328975371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,377 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741902_1078 (size=12301) 2024-11-11T12:41:55,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328975373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/a4814220e7ac4861bc502d3d51723e7a 2024-11-11T12:41:55,379 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328975373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/5cee728065e14db18d61087e74327ca7 is 50, key is test_row_0/B:col10/1731328915349/Put/seqid=0 2024-11-11T12:41:55,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741903_1079 (size=12301) 2024-11-11T12:41:55,414 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/5cee728065e14db18d61087e74327ca7 2024-11-11T12:41:55,445 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1e01a4ae3de54e56ba6bcd9dd41e25ec is 50, key is test_row_0/C:col10/1731328915349/Put/seqid=0 2024-11-11T12:41:55,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328975475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328975475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328975475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741904_1080 (size=12301) 2024-11-11T12:41:55,480 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=316 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1e01a4ae3de54e56ba6bcd9dd41e25ec 2024-11-11T12:41:55,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328975480, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,482 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328975481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,490 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/a4814220e7ac4861bc502d3d51723e7a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a4814220e7ac4861bc502d3d51723e7a 2024-11-11T12:41:55,499 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a4814220e7ac4861bc502d3d51723e7a, entries=150, sequenceid=316, filesize=12.0 K 2024-11-11T12:41:55,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/5cee728065e14db18d61087e74327ca7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/5cee728065e14db18d61087e74327ca7 2024-11-11T12:41:55,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/5cee728065e14db18d61087e74327ca7, entries=150, sequenceid=316, filesize=12.0 K 2024-11-11T12:41:55,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1e01a4ae3de54e56ba6bcd9dd41e25ec as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1e01a4ae3de54e56ba6bcd9dd41e25ec 2024-11-11T12:41:55,518 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1e01a4ae3de54e56ba6bcd9dd41e25ec, entries=150, sequenceid=316, filesize=12.0 K 2024-11-11T12:41:55,519 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=114.05 KB/116790 for 0a6a9f82df0ac9ece8343137343e2f72 in 169ms, sequenceid=316, compaction requested=true 2024-11-11T12:41:55,520 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:55,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:41:55,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:55,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:41:55,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:55,520 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:55,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:41:55,520 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:41:55,520 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:55,523 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:55,523 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:41:55,523 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:55,523 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f3a065dd542c4124949e23ee25867a0e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/6aaf745770d142ffa3130539a2d9f17a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f0c8f27e57d3414980f6d1da4c25298e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a4814220e7ac4861bc502d3d51723e7a] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=48.7 K 2024-11-11T12:41:55,525 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:55,525 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:41:55,525 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:55,525 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4dfd418c85ad4ad0a059a184ad233d21, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/097207f56fa24fd7a42aa7a1fdd34450, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3fdd73c2ec24423486a8794eaf01016b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/5cee728065e14db18d61087e74327ca7] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=48.7 K 2024-11-11T12:41:55,525 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f3a065dd542c4124949e23ee25867a0e, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731328912213 2024-11-11T12:41:55,526 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4dfd418c85ad4ad0a059a184ad233d21, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731328912213 2024-11-11T12:41:55,526 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6aaf745770d142ffa3130539a2d9f17a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731328912846 2024-11-11T12:41:55,526 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 097207f56fa24fd7a42aa7a1fdd34450, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731328912846 2024-11-11T12:41:55,527 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0c8f27e57d3414980f6d1da4c25298e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1731328913548 2024-11-11T12:41:55,527 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3fdd73c2ec24423486a8794eaf01016b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1731328913548 2024-11-11T12:41:55,527 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting a4814220e7ac4861bc502d3d51723e7a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1731328914220 2024-11-11T12:41:55,528 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 5cee728065e14db18d61087e74327ca7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1731328914220 2024-11-11T12:41:55,555 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#67 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:55,555 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#66 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:55,556 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/67dbab01b633428b9496322b66002bbe is 50, key is test_row_0/A:col10/1731328915349/Put/seqid=0 2024-11-11T12:41:55,556 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/77f42f0d39b74daea6ca4080351819ab is 50, key is test_row_0/B:col10/1731328915349/Put/seqid=0 2024-11-11T12:41:55,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741905_1081 (size=13051) 2024-11-11T12:41:55,616 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/67dbab01b633428b9496322b66002bbe as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/67dbab01b633428b9496322b66002bbe 2024-11-11T12:41:55,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741906_1082 (size=13051) 2024-11-11T12:41:55,626 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into 67dbab01b633428b9496322b66002bbe(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:55,626 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:55,626 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=12, startTime=1731328915520; duration=0sec 2024-11-11T12:41:55,626 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:55,627 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:41:55,627 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:41:55,628 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49818 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:41:55,628 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:41:55,629 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:55,629 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ef821e9b4cca4d27974cda9a5e0d097f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e5b75e23d78b4ef6945c41271415ea99, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a01863d085e645be9dc02e3612c9bb4b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1e01a4ae3de54e56ba6bcd9dd41e25ec] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=48.7 K 2024-11-11T12:41:55,629 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ef821e9b4cca4d27974cda9a5e0d097f, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1731328912213 2024-11-11T12:41:55,630 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5b75e23d78b4ef6945c41271415ea99, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=274, earliestPutTs=1731328912846 2024-11-11T12:41:55,630 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting a01863d085e645be9dc02e3612c9bb4b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1731328913548 2024-11-11T12:41:55,631 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e01a4ae3de54e56ba6bcd9dd41e25ec, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1731328914220 2024-11-11T12:41:55,662 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#68 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:55,664 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/0f05e5378f994f598361b65d5d21c9a0 is 50, key is test_row_0/C:col10/1731328915349/Put/seqid=0 2024-11-11T12:41:55,684 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-11T12:41:55,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:55,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:55,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:55,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:55,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:55,684 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:55,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:55,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/13a11fab1cf64541890ac28720e61b32 is 50, key is test_row_0/A:col10/1731328915371/Put/seqid=0 2024-11-11T12:41:55,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741907_1083 (size=13051) 2024-11-11T12:41:55,721 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328975714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328975716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328975716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328975722, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328975723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741908_1084 (size=14741) 2024-11-11T12:41:55,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-11-11T12:41:55,809 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 20 completed 2024-11-11T12:41:55,814 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:41:55,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees 2024-11-11T12:41:55,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-11T12:41:55,817 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:41:55,818 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=22, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:41:55,818 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:41:55,832 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328975830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328975831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328975831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,839 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328975839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,839 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:55,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328975839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-11T12:41:55,972 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:55,973 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:55,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:55,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:55,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:55,973 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:55,973 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:55,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,029 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/77f42f0d39b74daea6ca4080351819ab as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/77f42f0d39b74daea6ca4080351819ab 2024-11-11T12:41:56,035 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328976034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,036 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328976035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,039 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 77f42f0d39b74daea6ca4080351819ab(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:56,040 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:56,040 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=12, startTime=1731328915520; duration=0sec 2024-11-11T12:41:56,040 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:56,040 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:41:56,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328976038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328976050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,052 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328976051, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,138 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-11T12:41:56,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:56,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:56,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,153 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/13a11fab1cf64541890ac28720e61b32 2024-11-11T12:41:56,154 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/0f05e5378f994f598361b65d5d21c9a0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f05e5378f994f598361b65d5d21c9a0 2024-11-11T12:41:56,163 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 0f05e5378f994f598361b65d5d21c9a0(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:56,163 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:56,163 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=12, startTime=1731328915520; duration=0sec 2024-11-11T12:41:56,165 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:56,165 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:41:56,173 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/538092bfd2e748d0a1ffcd80925ff26f is 50, key is test_row_0/B:col10/1731328915371/Put/seqid=0 2024-11-11T12:41:56,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741909_1085 (size=12301) 2024-11-11T12:41:56,200 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/538092bfd2e748d0a1ffcd80925ff26f 2024-11-11T12:41:56,213 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8bd87703cc59466ca6d2c259bdeaf9be is 50, key is test_row_0/C:col10/1731328915371/Put/seqid=0 2024-11-11T12:41:56,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741910_1086 (size=12301) 2024-11-11T12:41:56,255 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=338 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8bd87703cc59466ca6d2c259bdeaf9be 2024-11-11T12:41:56,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/13a11fab1cf64541890ac28720e61b32 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/13a11fab1cf64541890ac28720e61b32 2024-11-11T12:41:56,280 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/13a11fab1cf64541890ac28720e61b32, entries=200, sequenceid=338, filesize=14.4 K 2024-11-11T12:41:56,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/538092bfd2e748d0a1ffcd80925ff26f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/538092bfd2e748d0a1ffcd80925ff26f 2024-11-11T12:41:56,288 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/538092bfd2e748d0a1ffcd80925ff26f, entries=150, sequenceid=338, filesize=12.0 K 2024-11-11T12:41:56,289 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/8bd87703cc59466ca6d2c259bdeaf9be as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8bd87703cc59466ca6d2c259bdeaf9be 2024-11-11T12:41:56,292 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:56,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:56,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,294 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,303 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8bd87703cc59466ca6d2c259bdeaf9be, entries=150, sequenceid=338, filesize=12.0 K 2024-11-11T12:41:56,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 0a6a9f82df0ac9ece8343137343e2f72 in 621ms, sequenceid=338, compaction requested=false 2024-11-11T12:41:56,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:56,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:56,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-11T12:41:56,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:56,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:56,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:56,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:56,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:56,342 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:56,352 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/da2ac016e0aa483aa55088658ba5e9af is 50, key is test_row_0/A:col10/1731328916339/Put/seqid=0 2024-11-11T12:41:56,380 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328976373, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,382 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328976377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328976378, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,383 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328976380, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328976382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741911_1087 (size=14741) 2024-11-11T12:41:56,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-11T12:41:56,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:56,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:56,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,448 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328976482, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,486 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328976484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,487 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328976484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,487 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328976484, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,488 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328976487, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,600 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,601 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:56,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:56,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,689 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328976687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,692 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328976689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328976690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,693 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328976690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,694 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328976691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,754 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,754 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:56,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:56,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,755 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/da2ac016e0aa483aa55088658ba5e9af 2024-11-11T12:41:56,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b5d772983ef14fafbba416a36288bd8b is 50, key is test_row_0/B:col10/1731328916339/Put/seqid=0 2024-11-11T12:41:56,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741912_1088 (size=12301) 2024-11-11T12:41:56,908 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,908 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:56,909 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,909 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:56,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-11T12:41:56,994 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,994 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328976993, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328976994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,995 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328976994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328976997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:56,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:56,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328976997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,063 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,063 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:57,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:57,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,064 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:57,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:57,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:57,217 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,218 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:57,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,218 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:57,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] handler.RSProcedureHandler(58): pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:57,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=23 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:57,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=23 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:41:57,221 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b5d772983ef14fafbba416a36288bd8b 2024-11-11T12:41:57,247 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1d85c460f9a84041b25af99ea5cc594b is 50, key is test_row_0/C:col10/1731328916339/Put/seqid=0 2024-11-11T12:41:57,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741913_1089 (size=12301) 2024-11-11T12:41:57,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=357 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1d85c460f9a84041b25af99ea5cc594b 2024-11-11T12:41:57,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/da2ac016e0aa483aa55088658ba5e9af as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/da2ac016e0aa483aa55088658ba5e9af 2024-11-11T12:41:57,278 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/da2ac016e0aa483aa55088658ba5e9af, entries=200, sequenceid=357, filesize=14.4 K 2024-11-11T12:41:57,279 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b5d772983ef14fafbba416a36288bd8b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b5d772983ef14fafbba416a36288bd8b 2024-11-11T12:41:57,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b5d772983ef14fafbba416a36288bd8b, entries=150, sequenceid=357, filesize=12.0 K 2024-11-11T12:41:57,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/1d85c460f9a84041b25af99ea5cc594b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1d85c460f9a84041b25af99ea5cc594b 2024-11-11T12:41:57,309 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1d85c460f9a84041b25af99ea5cc594b, entries=150, sequenceid=357, filesize=12.0 K 2024-11-11T12:41:57,310 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 0a6a9f82df0ac9ece8343137343e2f72 in 969ms, sequenceid=357, compaction requested=true 2024-11-11T12:41:57,311 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:57,311 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:57,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:41:57,311 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:57,311 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:57,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:41:57,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:57,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:41:57,312 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:57,313 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:57,313 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:41:57,313 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,313 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/77f42f0d39b74daea6ca4080351819ab, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/538092bfd2e748d0a1ffcd80925ff26f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b5d772983ef14fafbba416a36288bd8b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=36.8 K 2024-11-11T12:41:57,313 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 42533 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:57,314 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:41:57,314 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,314 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/67dbab01b633428b9496322b66002bbe, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/13a11fab1cf64541890ac28720e61b32, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/da2ac016e0aa483aa55088658ba5e9af] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=41.5 K 2024-11-11T12:41:57,314 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 77f42f0d39b74daea6ca4080351819ab, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1731328914220 2024-11-11T12:41:57,314 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 67dbab01b633428b9496322b66002bbe, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1731328914220 2024-11-11T12:41:57,315 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 538092bfd2e748d0a1ffcd80925ff26f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1731328915371 2024-11-11T12:41:57,316 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13a11fab1cf64541890ac28720e61b32, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1731328915366 2024-11-11T12:41:57,316 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b5d772983ef14fafbba416a36288bd8b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1731328915693 2024-11-11T12:41:57,317 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting da2ac016e0aa483aa55088658ba5e9af, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1731328915693 2024-11-11T12:41:57,348 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#75 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:57,349 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/425c98e1c65148d293fd874eaf24fd8a is 50, key is test_row_0/B:col10/1731328916339/Put/seqid=0 2024-11-11T12:41:57,367 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#76 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:57,368 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/ea68954294ca4ba2a8a18e424d44b4b1 is 50, key is test_row_0/A:col10/1731328916339/Put/seqid=0 2024-11-11T12:41:57,371 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,372 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=23 2024-11-11T12:41:57,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,372 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-11T12:41:57,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:57,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:57,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741914_1090 (size=13153) 2024-11-11T12:41:57,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/3d21d419015040eaa42c8f4dc3830b0f is 50, key is test_row_0/A:col10/1731328916372/Put/seqid=0 2024-11-11T12:41:57,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741915_1091 (size=13153) 2024-11-11T12:41:57,406 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/425c98e1c65148d293fd874eaf24fd8a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/425c98e1c65148d293fd874eaf24fd8a 2024-11-11T12:41:57,422 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 425c98e1c65148d293fd874eaf24fd8a(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:57,422 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:57,422 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=13, startTime=1731328917311; duration=0sec 2024-11-11T12:41:57,422 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:57,422 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:41:57,422 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:57,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741916_1092 (size=12301) 2024-11-11T12:41:57,424 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/3d21d419015040eaa42c8f4dc3830b0f 2024-11-11T12:41:57,426 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37653 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:57,426 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:41:57,426 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,426 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f05e5378f994f598361b65d5d21c9a0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8bd87703cc59466ca6d2c259bdeaf9be, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1d85c460f9a84041b25af99ea5cc594b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=36.8 K 2024-11-11T12:41:57,429 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f05e5378f994f598361b65d5d21c9a0, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=316, earliestPutTs=1731328914220 2024-11-11T12:41:57,429 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8bd87703cc59466ca6d2c259bdeaf9be, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=338, earliestPutTs=1731328915371 2024-11-11T12:41:57,430 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d85c460f9a84041b25af99ea5cc594b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1731328915693 2024-11-11T12:41:57,437 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/32ac4c2b45084e6d978e48c97f1980df is 50, key is test_row_0/B:col10/1731328916372/Put/seqid=0 2024-11-11T12:41:57,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741917_1093 (size=12301) 2024-11-11T12:41:57,468 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/32ac4c2b45084e6d978e48c97f1980df 2024-11-11T12:41:57,474 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#79 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:57,475 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/765bba0ac80e43aea01f1a17101a28b3 is 50, key is test_row_0/C:col10/1731328916339/Put/seqid=0 2024-11-11T12:41:57,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/ea5c878be7694dba95371cdc619239c8 is 50, key is test_row_0/C:col10/1731328916372/Put/seqid=0 2024-11-11T12:41:57,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741918_1094 (size=13153) 2024-11-11T12:41:57,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:57,500 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:41:57,506 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/765bba0ac80e43aea01f1a17101a28b3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/765bba0ac80e43aea01f1a17101a28b3 2024-11-11T12:41:57,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741919_1095 (size=12301) 2024-11-11T12:41:57,516 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 765bba0ac80e43aea01f1a17101a28b3(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:57,516 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:57,516 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=13, startTime=1731328917312; duration=0sec 2024-11-11T12:41:57,516 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:57,516 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:41:57,527 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328977522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328977522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,528 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328977522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328977524, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328977526, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,630 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328977629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328977629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328977630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328977632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328977632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328977833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328977833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,835 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328977833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,836 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/ea68954294ca4ba2a8a18e424d44b4b1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ea68954294ca4ba2a8a18e424d44b4b1 2024-11-11T12:41:57,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328977835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,843 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:57,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328977843, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:57,849 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into ea68954294ca4ba2a8a18e424d44b4b1(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:57,849 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:57,849 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=13, startTime=1731328917311; duration=0sec 2024-11-11T12:41:57,849 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:57,849 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:41:57,909 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=377 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/ea5c878be7694dba95371cdc619239c8 2024-11-11T12:41:57,923 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/3d21d419015040eaa42c8f4dc3830b0f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3d21d419015040eaa42c8f4dc3830b0f 2024-11-11T12:41:57,932 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3d21d419015040eaa42c8f4dc3830b0f, entries=150, sequenceid=377, filesize=12.0 K 2024-11-11T12:41:57,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/32ac4c2b45084e6d978e48c97f1980df as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/32ac4c2b45084e6d978e48c97f1980df 2024-11-11T12:41:57,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-11T12:41:57,949 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/32ac4c2b45084e6d978e48c97f1980df, entries=150, sequenceid=377, filesize=12.0 K 2024-11-11T12:41:57,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/ea5c878be7694dba95371cdc619239c8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ea5c878be7694dba95371cdc619239c8 2024-11-11T12:41:57,957 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ea5c878be7694dba95371cdc619239c8, entries=150, sequenceid=377, filesize=12.0 K 2024-11-11T12:41:57,958 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=87.22 KB/89310 for 0a6a9f82df0ac9ece8343137343e2f72 in 586ms, sequenceid=377, compaction requested=false 2024-11-11T12:41:57,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:57,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:57,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=23}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=23 2024-11-11T12:41:57,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=23 2024-11-11T12:41:57,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=23, resume processing ppid=22 2024-11-11T12:41:57,962 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, ppid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.1420 sec 2024-11-11T12:41:57,965 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=22, table=TestAcidGuarantees in 2.1480 sec 2024-11-11T12:41:58,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:58,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-11T12:41:58,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:58,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:58,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:58,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:58,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:58,143 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:58,151 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/b1de8f17053e41b7920e727aad397c2d is 50, key is test_row_0/A:col10/1731328918139/Put/seqid=0 2024-11-11T12:41:58,168 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328978164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,169 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328978164, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,170 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328978167, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328978168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,172 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328978168, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741920_1096 (size=14741) 2024-11-11T12:41:58,187 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/b1de8f17053e41b7920e727aad397c2d 2024-11-11T12:41:58,198 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/636ecde85bde44489f00b79a03a0a41b is 50, key is test_row_0/B:col10/1731328918139/Put/seqid=0 2024-11-11T12:41:58,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741921_1097 (size=12301) 2024-11-11T12:41:58,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328978270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328978270, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,272 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328978271, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,274 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328978273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,274 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328978274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,474 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328978474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328978474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,475 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328978474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,476 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328978475, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328978476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,603 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/636ecde85bde44489f00b79a03a0a41b 2024-11-11T12:41:58,613 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/b6f0d91439e44a7e9959cc9d2a90698e is 50, key is test_row_0/C:col10/1731328918139/Put/seqid=0 2024-11-11T12:41:58,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741922_1098 (size=12301) 2024-11-11T12:41:58,618 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=35.78 KB at sequenceid=399 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/b6f0d91439e44a7e9959cc9d2a90698e 2024-11-11T12:41:58,624 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/b1de8f17053e41b7920e727aad397c2d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b1de8f17053e41b7920e727aad397c2d 2024-11-11T12:41:58,630 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b1de8f17053e41b7920e727aad397c2d, entries=200, sequenceid=399, filesize=14.4 K 2024-11-11T12:41:58,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/636ecde85bde44489f00b79a03a0a41b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/636ecde85bde44489f00b79a03a0a41b 2024-11-11T12:41:58,638 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/636ecde85bde44489f00b79a03a0a41b, entries=150, sequenceid=399, filesize=12.0 K 2024-11-11T12:41:58,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/b6f0d91439e44a7e9959cc9d2a90698e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b6f0d91439e44a7e9959cc9d2a90698e 2024-11-11T12:41:58,646 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b6f0d91439e44a7e9959cc9d2a90698e, entries=150, sequenceid=399, filesize=12.0 K 2024-11-11T12:41:58,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~107.34 KB/109920, heapSize ~281.95 KB/288720, currentSize=100.63 KB/103050 for 0a6a9f82df0ac9ece8343137343e2f72 in 508ms, sequenceid=399, compaction requested=true 2024-11-11T12:41:58,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:58,650 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:58,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:41:58,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:58,650 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:58,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:41:58,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:58,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:41:58,650 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:58,654 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:58,654 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:41:58,654 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:58,654 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/425c98e1c65148d293fd874eaf24fd8a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/32ac4c2b45084e6d978e48c97f1980df, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/636ecde85bde44489f00b79a03a0a41b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=36.9 K 2024-11-11T12:41:58,655 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40195 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:58,655 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:41:58,655 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:58,655 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 425c98e1c65148d293fd874eaf24fd8a, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1731328915693 2024-11-11T12:41:58,655 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ea68954294ca4ba2a8a18e424d44b4b1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3d21d419015040eaa42c8f4dc3830b0f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b1de8f17053e41b7920e727aad397c2d] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=39.3 K 2024-11-11T12:41:58,655 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 32ac4c2b45084e6d978e48c97f1980df, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1731328916372 2024-11-11T12:41:58,656 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ea68954294ca4ba2a8a18e424d44b4b1, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1731328915693 2024-11-11T12:41:58,656 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 636ecde85bde44489f00b79a03a0a41b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1731328918139 2024-11-11T12:41:58,657 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3d21d419015040eaa42c8f4dc3830b0f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1731328916372 2024-11-11T12:41:58,657 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1de8f17053e41b7920e727aad397c2d, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1731328917519 2024-11-11T12:41:58,669 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#84 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:58,671 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/3a77e60ddc0a43629c98c7d180cbdbb3 is 50, key is test_row_0/B:col10/1731328918139/Put/seqid=0 2024-11-11T12:41:58,672 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#85 average throughput is 1.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:58,673 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/c16accc145144b8b8f89e4010b1affe0 is 50, key is test_row_0/A:col10/1731328918139/Put/seqid=0 2024-11-11T12:41:58,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741923_1099 (size=13255) 2024-11-11T12:41:58,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741924_1100 (size=13255) 2024-11-11T12:41:58,689 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/3a77e60ddc0a43629c98c7d180cbdbb3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3a77e60ddc0a43629c98c7d180cbdbb3 2024-11-11T12:41:58,698 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/c16accc145144b8b8f89e4010b1affe0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/c16accc145144b8b8f89e4010b1affe0 2024-11-11T12:41:58,704 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 3a77e60ddc0a43629c98c7d180cbdbb3(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:58,704 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:58,704 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=13, startTime=1731328918650; duration=0sec 2024-11-11T12:41:58,704 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:41:58,705 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:41:58,705 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:41:58,707 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:41:58,707 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:41:58,708 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:41:58,708 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/765bba0ac80e43aea01f1a17101a28b3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ea5c878be7694dba95371cdc619239c8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b6f0d91439e44a7e9959cc9d2a90698e] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=36.9 K 2024-11-11T12:41:58,709 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into c16accc145144b8b8f89e4010b1affe0(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:58,709 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:58,709 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 765bba0ac80e43aea01f1a17101a28b3, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=357, earliestPutTs=1731328915693 2024-11-11T12:41:58,709 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=13, startTime=1731328918649; duration=0sec 2024-11-11T12:41:58,709 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:58,709 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:41:58,709 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ea5c878be7694dba95371cdc619239c8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=377, earliestPutTs=1731328916372 2024-11-11T12:41:58,710 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b6f0d91439e44a7e9959cc9d2a90698e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1731328918139 2024-11-11T12:41:58,760 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#86 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:41:58,761 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/462b91d9c53e4f22b18b9b57ee92f9f2 is 50, key is test_row_0/C:col10/1731328918139/Put/seqid=0 2024-11-11T12:41:58,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:58,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=107.34 KB heapSize=282 KB 2024-11-11T12:41:58,783 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:58,784 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:58,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328978797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328978797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,802 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328978798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,803 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328978800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,804 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328978800, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741925_1101 (size=13255) 2024-11-11T12:41:58,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/db5d6a4a04b24fd58373f29b7558f2c7 is 50, key is test_row_0/A:col10/1731328918164/Put/seqid=0 2024-11-11T12:41:58,825 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/462b91d9c53e4f22b18b9b57ee92f9f2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/462b91d9c53e4f22b18b9b57ee92f9f2 2024-11-11T12:41:58,836 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 462b91d9c53e4f22b18b9b57ee92f9f2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:41:58,836 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:58,836 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=13, startTime=1731328918650; duration=0sec 2024-11-11T12:41:58,836 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:41:58,836 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:41:58,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741926_1102 (size=12301) 2024-11-11T12:41:58,903 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328978902, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,904 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328978903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328978904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328978905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:58,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:58,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328978905, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328979107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328979107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328979107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,113 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328979108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328979107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/db5d6a4a04b24fd58373f29b7558f2c7 2024-11-11T12:41:59,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/89360db6cc4e486e8f0d1094a5288e6e is 50, key is test_row_0/B:col10/1731328918164/Put/seqid=0 2024-11-11T12:41:59,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741927_1103 (size=12301) 2024-11-11T12:41:59,412 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328979412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328979414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328979415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,418 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328979417, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,418 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328979418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,678 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/89360db6cc4e486e8f0d1094a5288e6e 2024-11-11T12:41:59,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e21957cdc0834952bb63eec044a972e4 is 50, key is test_row_0/C:col10/1731328918164/Put/seqid=0 2024-11-11T12:41:59,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741928_1104 (size=12301) 2024-11-11T12:41:59,696 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=40.25 KB at sequenceid=422 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e21957cdc0834952bb63eec044a972e4 2024-11-11T12:41:59,701 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/db5d6a4a04b24fd58373f29b7558f2c7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/db5d6a4a04b24fd58373f29b7558f2c7 2024-11-11T12:41:59,707 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/db5d6a4a04b24fd58373f29b7558f2c7, entries=150, sequenceid=422, filesize=12.0 K 2024-11-11T12:41:59,708 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/89360db6cc4e486e8f0d1094a5288e6e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/89360db6cc4e486e8f0d1094a5288e6e 2024-11-11T12:41:59,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/89360db6cc4e486e8f0d1094a5288e6e, entries=150, sequenceid=422, filesize=12.0 K 2024-11-11T12:41:59,714 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e21957cdc0834952bb63eec044a972e4 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e21957cdc0834952bb63eec044a972e4 2024-11-11T12:41:59,728 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e21957cdc0834952bb63eec044a972e4, entries=150, sequenceid=422, filesize=12.0 K 2024-11-11T12:41:59,729 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~120.76 KB/123660, heapSize ~317.11 KB/324720, currentSize=87.22 KB/89310 for 0a6a9f82df0ac9ece8343137343e2f72 in 948ms, sequenceid=422, compaction requested=false 2024-11-11T12:41:59,729 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:41:59,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:41:59,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=93.93 KB heapSize=246.84 KB 2024-11-11T12:41:59,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:41:59,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:59,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:41:59,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:59,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:41:59,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:41:59,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/e02edd313cc049fabfd68b14c87de386 is 50, key is test_row_0/A:col10/1731328918798/Put/seqid=0 2024-11-11T12:41:59,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741929_1105 (size=12301) 2024-11-11T12:41:59,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328979938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,944 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328979941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=22 2024-11-11T12:41:59,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328979941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,945 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 22 completed 2024-11-11T12:41:59,946 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:41:59,947 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328979944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,947 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:41:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328979945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:41:59,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees 2024-11-11T12:41:59,948 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:41:59,949 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=24, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:41:59,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-11T12:41:59,949 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:00,047 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328980045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328980046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328980046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328980049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-11T12:42:00,050 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328980049, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,102 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,102 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:00,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:00,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,103 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,103 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328980249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-11T12:42:00,251 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328980251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328980251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328980252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,254 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328980253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,256 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,260 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:00,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:00,260 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,260 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,261 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/e02edd313cc049fabfd68b14c87de386 2024-11-11T12:42:00,356 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/7ac996e5a47140bf92e3edf5cf298e8d is 50, key is test_row_0/B:col10/1731328918798/Put/seqid=0 2024-11-11T12:42:00,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741930_1106 (size=12301) 2024-11-11T12:42:00,417 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,418 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:00,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:00,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,418 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-11T12:42:00,553 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328980552, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,554 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328980553, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,556 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328980555, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,557 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328980556, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,559 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:00,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328980557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,570 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,571 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,571 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,571 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,726 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,730 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:00,730 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:00,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,731 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,731 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,767 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/7ac996e5a47140bf92e3edf5cf298e8d 2024-11-11T12:42:00,777 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/b46ecb17df784b1b966eb5fcd728100d is 50, key is test_row_0/C:col10/1731328918798/Put/seqid=0 2024-11-11T12:42:00,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741931_1107 (size=12301) 2024-11-11T12:42:00,884 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:00,884 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:00,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:00,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:00,885 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,885 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:00,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:01,042 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:01,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:01,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:01,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,043 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:01,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:01,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:01,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-11T12:42:01,057 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:01,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328981056, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:01,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328981058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:01,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:01,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328981059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:01,060 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:01,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328981059, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:01,064 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:01,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328981062, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:01,188 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=440 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/b46ecb17df784b1b966eb5fcd728100d 2024-11-11T12:42:01,196 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:01,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:01,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:01,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,197 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] handler.RSProcedureHandler(58): pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:01,197 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=25 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:01,197 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/e02edd313cc049fabfd68b14c87de386 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e02edd313cc049fabfd68b14c87de386 2024-11-11T12:42:01,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=25 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:01,205 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e02edd313cc049fabfd68b14c87de386, entries=150, sequenceid=440, filesize=12.0 K 2024-11-11T12:42:01,207 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/7ac996e5a47140bf92e3edf5cf298e8d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/7ac996e5a47140bf92e3edf5cf298e8d 2024-11-11T12:42:01,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/7ac996e5a47140bf92e3edf5cf298e8d, entries=150, sequenceid=440, filesize=12.0 K 2024-11-11T12:42:01,218 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/b46ecb17df784b1b966eb5fcd728100d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b46ecb17df784b1b966eb5fcd728100d 2024-11-11T12:42:01,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b46ecb17df784b1b966eb5fcd728100d, entries=150, sequenceid=440, filesize=12.0 K 2024-11-11T12:42:01,237 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=114.05 KB/116790 for 0a6a9f82df0ac9ece8343137343e2f72 in 1320ms, sequenceid=440, compaction requested=true 2024-11-11T12:42:01,237 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:01,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:01,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:01,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:01,237 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:01,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:01,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:01,237 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:01,237 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:01,239 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:01,239 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:42:01,240 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,240 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3a77e60ddc0a43629c98c7d180cbdbb3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/89360db6cc4e486e8f0d1094a5288e6e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/7ac996e5a47140bf92e3edf5cf298e8d] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.0 K 2024-11-11T12:42:01,240 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:01,241 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:42:01,241 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,241 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/c16accc145144b8b8f89e4010b1affe0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/db5d6a4a04b24fd58373f29b7558f2c7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e02edd313cc049fabfd68b14c87de386] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.0 K 2024-11-11T12:42:01,242 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3a77e60ddc0a43629c98c7d180cbdbb3, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1731328918139 2024-11-11T12:42:01,243 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting c16accc145144b8b8f89e4010b1affe0, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1731328918139 2024-11-11T12:42:01,243 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 89360db6cc4e486e8f0d1094a5288e6e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1731328918164 2024-11-11T12:42:01,243 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting db5d6a4a04b24fd58373f29b7558f2c7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1731328918164 2024-11-11T12:42:01,244 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7ac996e5a47140bf92e3edf5cf298e8d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1731328918794 2024-11-11T12:42:01,244 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e02edd313cc049fabfd68b14c87de386, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1731328918794 2024-11-11T12:42:01,267 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#93 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:01,268 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/084f5f8b68c1430db71aafff10759dfa is 50, key is test_row_0/B:col10/1731328918798/Put/seqid=0 2024-11-11T12:42:01,282 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#94 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:01,283 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/8df0c1838be94cff92d110d90851c4cf is 50, key is test_row_0/A:col10/1731328918798/Put/seqid=0 2024-11-11T12:42:01,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741933_1109 (size=13357) 2024-11-11T12:42:01,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741932_1108 (size=13357) 2024-11-11T12:42:01,343 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/8df0c1838be94cff92d110d90851c4cf as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8df0c1838be94cff92d110d90851c4cf 2024-11-11T12:42:01,349 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:01,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=25 2024-11-11T12:42:01,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,350 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=114.05 KB heapSize=299.58 KB 2024-11-11T12:42:01,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:42:01,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:01,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:42:01,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:01,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:42:01,351 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:01,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/1346ae30a1a842b99353f291551c0316 is 50, key is test_row_0/A:col10/1731328919942/Put/seqid=0 2024-11-11T12:42:01,364 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into 8df0c1838be94cff92d110d90851c4cf(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:01,364 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:01,364 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=13, startTime=1731328921237; duration=0sec 2024-11-11T12:42:01,364 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:01,364 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:42:01,364 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:01,366 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37857 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:01,366 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:42:01,366 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,366 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/462b91d9c53e4f22b18b9b57ee92f9f2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e21957cdc0834952bb63eec044a972e4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b46ecb17df784b1b966eb5fcd728100d] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.0 K 2024-11-11T12:42:01,367 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 462b91d9c53e4f22b18b9b57ee92f9f2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=399, earliestPutTs=1731328918139 2024-11-11T12:42:01,367 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e21957cdc0834952bb63eec044a972e4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=422, earliestPutTs=1731328918164 2024-11-11T12:42:01,368 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b46ecb17df784b1b966eb5fcd728100d, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1731328918794 2024-11-11T12:42:01,386 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#96 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:01,387 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/08c23f048eca4cd69f22f42118c48a49 is 50, key is test_row_0/C:col10/1731328918798/Put/seqid=0 2024-11-11T12:42:01,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741934_1110 (size=12301) 2024-11-11T12:42:01,407 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/1346ae30a1a842b99353f291551c0316 2024-11-11T12:42:01,438 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/d7f5eb4777ca4037a848d32e8844d74a is 50, key is test_row_0/B:col10/1731328919942/Put/seqid=0 2024-11-11T12:42:01,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741935_1111 (size=13357) 2024-11-11T12:42:01,462 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/08c23f048eca4cd69f22f42118c48a49 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/08c23f048eca4cd69f22f42118c48a49 2024-11-11T12:42:01,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741936_1112 (size=12301) 2024-11-11T12:42:01,485 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 08c23f048eca4cd69f22f42118c48a49(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:01,485 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:01,485 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=13, startTime=1731328921237; duration=0sec 2024-11-11T12:42:01,485 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:01,485 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:42:01,743 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/084f5f8b68c1430db71aafff10759dfa as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/084f5f8b68c1430db71aafff10759dfa 2024-11-11T12:42:01,756 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 084f5f8b68c1430db71aafff10759dfa(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:01,756 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:01,756 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=13, startTime=1731328921237; duration=0sec 2024-11-11T12:42:01,757 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:01,757 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:42:01,878 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/d7f5eb4777ca4037a848d32e8844d74a 2024-11-11T12:42:01,888 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/422ec05252644b9c927ac4ff9cef2f9a is 50, key is test_row_0/C:col10/1731328919942/Put/seqid=0 2024-11-11T12:42:01,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741937_1113 (size=12301) 2024-11-11T12:42:01,918 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=38.02 KB at sequenceid=460 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/422ec05252644b9c927ac4ff9cef2f9a 2024-11-11T12:42:01,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/1346ae30a1a842b99353f291551c0316 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/1346ae30a1a842b99353f291551c0316 2024-11-11T12:42:01,933 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/1346ae30a1a842b99353f291551c0316, entries=150, sequenceid=460, filesize=12.0 K 2024-11-11T12:42:01,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/d7f5eb4777ca4037a848d32e8844d74a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/d7f5eb4777ca4037a848d32e8844d74a 2024-11-11T12:42:01,941 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/d7f5eb4777ca4037a848d32e8844d74a, entries=150, sequenceid=460, filesize=12.0 K 2024-11-11T12:42:01,942 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/422ec05252644b9c927ac4ff9cef2f9a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/422ec05252644b9c927ac4ff9cef2f9a 2024-11-11T12:42:01,956 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/422ec05252644b9c927ac4ff9cef2f9a, entries=150, sequenceid=460, filesize=12.0 K 2024-11-11T12:42:01,957 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(3040): Finished flush of dataSize ~114.05 KB/116790, heapSize ~299.53 KB/306720, currentSize=0 B/0 for 0a6a9f82df0ac9ece8343137343e2f72 in 607ms, sequenceid=460, compaction requested=false 2024-11-11T12:42:01,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:01,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:01,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=25}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=25 2024-11-11T12:42:01,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=25 2024-11-11T12:42:01,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-11-11T12:42:01,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0100 sec 2024-11-11T12:42:01,964 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=24, table=TestAcidGuarantees in 2.0160 sec 2024-11-11T12:42:02,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=24 2024-11-11T12:42:02,054 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 24 completed 2024-11-11T12:42:02,055 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:02,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees 2024-11-11T12:42:02,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-11T12:42:02,057 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:02,058 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=26, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:02,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:02,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:02,073 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:42:02,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:42:02,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:42:02,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:42:02,076 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,083 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/f74ef7fc4d6642e4bc38ab63cb3d392e is 50, key is test_row_0/A:col10/1731328922072/Put/seqid=0 2024-11-11T12:42:02,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741938_1114 (size=12301) 2024-11-11T12:42:02,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328982121, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,131 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 201 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328982123, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328982125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 213 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328982125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,148 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 219 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328982144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-11T12:42:02,210 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,211 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-11T12:42:02,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:02,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,213 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,213 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,231 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 210 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328982230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328982233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328982233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,236 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 215 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328982234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 221 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328982250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-11T12:42:02,379 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,379 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-11T12:42:02,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:02,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,380 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,434 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 212 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328982433, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,438 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328982437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 205 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328982437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 217 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328982437, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,454 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328982453, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,501 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/f74ef7fc4d6642e4bc38ab63cb3d392e 2024-11-11T12:42:02,518 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/ae86e81b69ad4b2080e602523a641948 is 50, key is test_row_0/B:col10/1731328922072/Put/seqid=0 2024-11-11T12:42:02,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741939_1115 (size=12301) 2024-11-11T12:42:02,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/ae86e81b69ad4b2080e602523a641948 2024-11-11T12:42:02,532 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,533 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-11T12:42:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,533 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] handler.RSProcedureHandler(58): pid=27 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,533 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=27 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=27 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:02,537 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/0db273232e444c4981a3118b1bc14742 is 50, key is test_row_0/C:col10/1731328922072/Put/seqid=0 2024-11-11T12:42:02,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741940_1116 (size=12301) 2024-11-11T12:42:02,544 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=476 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/0db273232e444c4981a3118b1bc14742 2024-11-11T12:42:02,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/f74ef7fc4d6642e4bc38ab63cb3d392e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f74ef7fc4d6642e4bc38ab63cb3d392e 2024-11-11T12:42:02,553 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f74ef7fc4d6642e4bc38ab63cb3d392e, entries=150, sequenceid=476, filesize=12.0 K 2024-11-11T12:42:02,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/ae86e81b69ad4b2080e602523a641948 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ae86e81b69ad4b2080e602523a641948 2024-11-11T12:42:02,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ae86e81b69ad4b2080e602523a641948, entries=150, sequenceid=476, filesize=12.0 K 2024-11-11T12:42:02,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/0db273232e444c4981a3118b1bc14742 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0db273232e444c4981a3118b1bc14742 2024-11-11T12:42:02,571 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0db273232e444c4981a3118b1bc14742, entries=150, sequenceid=476, filesize=12.0 K 2024-11-11T12:42:02,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=147.60 KB/151140 for 0a6a9f82df0ac9ece8343137343e2f72 in 498ms, sequenceid=476, compaction requested=true 2024-11-11T12:42:02,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:02,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:02,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:02,572 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:02,572 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:02,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:02,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:02,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:02,572 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:02,573 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:02,574 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:42:02,574 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,574 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8df0c1838be94cff92d110d90851c4cf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/1346ae30a1a842b99353f291551c0316, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f74ef7fc4d6642e4bc38ab63cb3d392e] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.1 K 2024-11-11T12:42:02,574 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:02,574 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:42:02,574 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,574 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/084f5f8b68c1430db71aafff10759dfa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/d7f5eb4777ca4037a848d32e8844d74a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ae86e81b69ad4b2080e602523a641948] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.1 K 2024-11-11T12:42:02,575 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8df0c1838be94cff92d110d90851c4cf, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1731328918794 2024-11-11T12:42:02,575 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 084f5f8b68c1430db71aafff10759dfa, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1731328918794 2024-11-11T12:42:02,575 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d7f5eb4777ca4037a848d32e8844d74a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1731328919936 2024-11-11T12:42:02,575 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1346ae30a1a842b99353f291551c0316, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1731328919936 2024-11-11T12:42:02,576 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f74ef7fc4d6642e4bc38ab63cb3d392e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1731328922067 2024-11-11T12:42:02,576 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ae86e81b69ad4b2080e602523a641948, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1731328922067 2024-11-11T12:42:02,600 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#102 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:02,600 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/4a313b2c6f5a44b1ac1ff69053e1dcc6 is 50, key is test_row_0/B:col10/1731328922072/Put/seqid=0 2024-11-11T12:42:02,601 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#103 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:02,602 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/53f409bcf6294ea08c36f192a845d082 is 50, key is test_row_0/A:col10/1731328922072/Put/seqid=0 2024-11-11T12:42:02,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741941_1117 (size=13459) 2024-11-11T12:42:02,614 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/4a313b2c6f5a44b1ac1ff69053e1dcc6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4a313b2c6f5a44b1ac1ff69053e1dcc6 2024-11-11T12:42:02,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741942_1118 (size=13459) 2024-11-11T12:42:02,629 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 4a313b2c6f5a44b1ac1ff69053e1dcc6(size=13.1 K), total size for store is 13.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:02,630 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:02,630 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=13, startTime=1731328922572; duration=0sec 2024-11-11T12:42:02,630 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:02,630 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:42:02,630 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:02,632 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37959 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:02,632 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:42:02,632 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,632 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/08c23f048eca4cd69f22f42118c48a49, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/422ec05252644b9c927ac4ff9cef2f9a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0db273232e444c4981a3118b1bc14742] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.1 K 2024-11-11T12:42:02,632 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 08c23f048eca4cd69f22f42118c48a49, keycount=150, bloomtype=ROW, size=13.0 K, encoding=NONE, compression=NONE, seqNum=440, earliestPutTs=1731328918794 2024-11-11T12:42:02,633 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 422ec05252644b9c927ac4ff9cef2f9a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=460, earliestPutTs=1731328919936 2024-11-11T12:42:02,634 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0db273232e444c4981a3118b1bc14742, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1731328922067 2024-11-11T12:42:02,644 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#104 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:02,645 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/efd83bab1b4b454d97da89daea7841a9 is 50, key is test_row_0/C:col10/1731328922072/Put/seqid=0 2024-11-11T12:42:02,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741943_1119 (size=13459) 2024-11-11T12:42:02,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-11T12:42:02,686 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=27 2024-11-11T12:42:02,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,687 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:42:02,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:42:02,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:42:02,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:42:02,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/699601e8e80c42f7bb24030b2b40ef16 is 50, key is test_row_0/A:col10/1731328922123/Put/seqid=0 2024-11-11T12:42:02,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741944_1120 (size=12301) 2024-11-11T12:42:02,742 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/699601e8e80c42f7bb24030b2b40ef16 2024-11-11T12:42:02,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:02,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:02,761 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328982757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,762 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/843b1b55c3cb4dd7a014d16888402ee9 is 50, key is test_row_0/B:col10/1731328922123/Put/seqid=0 2024-11-11T12:42:02,763 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 223 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328982760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,764 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 209 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328982762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328982763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,766 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328982764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741945_1121 (size=12301) 2024-11-11T12:42:02,777 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/843b1b55c3cb4dd7a014d16888402ee9 2024-11-11T12:42:02,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/f0a622c967d04b2cbb7d82b8131ae323 is 50, key is test_row_0/C:col10/1731328922123/Put/seqid=0 2024-11-11T12:42:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741946_1122 (size=12301) 2024-11-11T12:42:02,802 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=502 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/f0a622c967d04b2cbb7d82b8131ae323 2024-11-11T12:42:02,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/699601e8e80c42f7bb24030b2b40ef16 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/699601e8e80c42f7bb24030b2b40ef16 2024-11-11T12:42:02,818 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/699601e8e80c42f7bb24030b2b40ef16, entries=150, sequenceid=502, filesize=12.0 K 2024-11-11T12:42:02,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/843b1b55c3cb4dd7a014d16888402ee9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/843b1b55c3cb4dd7a014d16888402ee9 2024-11-11T12:42:02,828 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/843b1b55c3cb4dd7a014d16888402ee9, entries=150, sequenceid=502, filesize=12.0 K 2024-11-11T12:42:02,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/f0a622c967d04b2cbb7d82b8131ae323 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f0a622c967d04b2cbb7d82b8131ae323 2024-11-11T12:42:02,840 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f0a622c967d04b2cbb7d82b8131ae323, entries=150, sequenceid=502, filesize=12.0 K 2024-11-11T12:42:02,842 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for 0a6a9f82df0ac9ece8343137343e2f72 in 154ms, sequenceid=502, compaction requested=false 2024-11-11T12:42:02,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:02,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:02,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=27}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=27 2024-11-11T12:42:02,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=27 2024-11-11T12:42:02,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-11-11T12:42:02,856 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 785 msec 2024-11-11T12:42:02,859 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=26, table=TestAcidGuarantees in 802 msec 2024-11-11T12:42:02,867 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:42:02,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:02,868 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:42:02,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:42:02,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:42:02,869 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:02,878 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/d6c9fdf21ad94846a22f15a22d641fa9 is 50, key is test_row_0/A:col10/1731328922867/Put/seqid=0 2024-11-11T12:42:02,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741947_1123 (size=14741) 2024-11-11T12:42:02,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 216 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328982894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,897 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328982895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,898 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328982895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328982897, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:02,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 218 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328982998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:02,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 224 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328982998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328982999, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 230 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328983002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,029 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/53f409bcf6294ea08c36f192a845d082 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/53f409bcf6294ea08c36f192a845d082 2024-11-11T12:42:03,046 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into 53f409bcf6294ea08c36f192a845d082(size=13.1 K), total size for store is 25.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:03,046 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:03,046 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=13, startTime=1731328922572; duration=0sec 2024-11-11T12:42:03,047 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:03,047 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:42:03,064 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/efd83bab1b4b454d97da89daea7841a9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/efd83bab1b4b454d97da89daea7841a9 2024-11-11T12:42:03,072 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into efd83bab1b4b454d97da89daea7841a9(size=13.1 K), total size for store is 25.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:03,072 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:03,073 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=13, startTime=1731328922572; duration=0sec 2024-11-11T12:42:03,073 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:03,073 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:42:03,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=26 2024-11-11T12:42:03,164 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 26 completed 2024-11-11T12:42:03,174 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:03,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees 2024-11-11T12:42:03,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-11T12:42:03,177 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:03,179 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=28, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:03,179 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:03,203 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,203 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 220 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328983201, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 226 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328983202, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,206 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 232 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328983205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328983205, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328983265, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-11T12:42:03,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/d6c9fdf21ad94846a22f15a22d641fa9 2024-11-11T12:42:03,297 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b132f333daba4fa096bd8bc1897d97b9 is 50, key is test_row_0/B:col10/1731328922867/Put/seqid=0 2024-11-11T12:42:03,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741948_1124 (size=12301) 2024-11-11T12:42:03,303 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b132f333daba4fa096bd8bc1897d97b9 2024-11-11T12:42:03,311 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/592feb8164814fa1ad9141813f928afa is 50, key is test_row_0/C:col10/1731328922867/Put/seqid=0 2024-11-11T12:42:03,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741949_1125 (size=12301) 2024-11-11T12:42:03,334 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,335 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-11T12:42:03,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:03,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,335 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,335 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-11T12:42:03,487 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-11T12:42:03,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:03,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,488 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,488 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,507 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 222 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328983506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,507 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 228 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328983506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 234 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328983511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,513 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:03,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328983512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,640 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,641 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-11T12:42:03,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:03,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,641 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] handler.RSProcedureHandler(58): pid=29 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=29 java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=29 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:03,718 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=516 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/592feb8164814fa1ad9141813f928afa 2024-11-11T12:42:03,726 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/d6c9fdf21ad94846a22f15a22d641fa9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/d6c9fdf21ad94846a22f15a22d641fa9 2024-11-11T12:42:03,736 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/d6c9fdf21ad94846a22f15a22d641fa9, entries=200, sequenceid=516, filesize=14.4 K 2024-11-11T12:42:03,738 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b132f333daba4fa096bd8bc1897d97b9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b132f333daba4fa096bd8bc1897d97b9 2024-11-11T12:42:03,744 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b132f333daba4fa096bd8bc1897d97b9, entries=150, sequenceid=516, filesize=12.0 K 2024-11-11T12:42:03,747 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/592feb8164814fa1ad9141813f928afa as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/592feb8164814fa1ad9141813f928afa 2024-11-11T12:42:03,756 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/592feb8164814fa1ad9141813f928afa, entries=150, sequenceid=516, filesize=12.0 K 2024-11-11T12:42:03,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for 0a6a9f82df0ac9ece8343137343e2f72 in 891ms, sequenceid=516, compaction requested=true 2024-11-11T12:42:03,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:03,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:03,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:03,759 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:03,759 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:03,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:03,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:03,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:03,759 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:03,761 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40501 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:03,761 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:42:03,761 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,761 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/53f409bcf6294ea08c36f192a845d082, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/699601e8e80c42f7bb24030b2b40ef16, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/d6c9fdf21ad94846a22f15a22d641fa9] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=39.6 K 2024-11-11T12:42:03,761 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:03,761 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:42:03,761 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,762 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4a313b2c6f5a44b1ac1ff69053e1dcc6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/843b1b55c3cb4dd7a014d16888402ee9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b132f333daba4fa096bd8bc1897d97b9] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.2 K 2024-11-11T12:42:03,762 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53f409bcf6294ea08c36f192a845d082, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1731328922067 2024-11-11T12:42:03,763 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4a313b2c6f5a44b1ac1ff69053e1dcc6, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1731328922067 2024-11-11T12:42:03,764 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 699601e8e80c42f7bb24030b2b40ef16, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1731328922118 2024-11-11T12:42:03,764 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 843b1b55c3cb4dd7a014d16888402ee9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1731328922118 2024-11-11T12:42:03,764 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d6c9fdf21ad94846a22f15a22d641fa9, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1731328922749 2024-11-11T12:42:03,764 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b132f333daba4fa096bd8bc1897d97b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1731328922760 2024-11-11T12:42:03,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-11T12:42:03,779 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#111 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:03,780 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/9c15eb3169b84a2c9c2258eb1d6444db is 50, key is test_row_0/A:col10/1731328922867/Put/seqid=0 2024-11-11T12:42:03,783 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#112 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:03,784 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/793e2916b13642eb8d8b297f72dabb2f is 50, key is test_row_0/B:col10/1731328922867/Put/seqid=0 2024-11-11T12:42:03,793 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:03,797 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=29 2024-11-11T12:42:03,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741950_1126 (size=13561) 2024-11-11T12:42:03,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,803 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:42:03,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:42:03,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:03,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:42:03,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:03,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:42:03,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:03,815 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/9c15eb3169b84a2c9c2258eb1d6444db as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/9c15eb3169b84a2c9c2258eb1d6444db 2024-11-11T12:42:03,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741951_1127 (size=13561) 2024-11-11T12:42:03,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/ff15b7d8ccf64852bee3599df5a42ec4 is 50, key is test_row_0/A:col10/1731328922895/Put/seqid=0 2024-11-11T12:42:03,828 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into 9c15eb3169b84a2c9c2258eb1d6444db(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:03,828 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:03,828 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=13, startTime=1731328923759; duration=0sec 2024-11-11T12:42:03,828 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:03,828 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:42:03,828 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:03,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741952_1128 (size=12301) 2024-11-11T12:42:03,836 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/ff15b7d8ccf64852bee3599df5a42ec4 2024-11-11T12:42:03,837 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38061 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:03,837 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:42:03,837 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:03,843 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/efd83bab1b4b454d97da89daea7841a9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f0a622c967d04b2cbb7d82b8131ae323, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/592feb8164814fa1ad9141813f928afa] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.2 K 2024-11-11T12:42:03,844 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting efd83bab1b4b454d97da89daea7841a9, keycount=150, bloomtype=ROW, size=13.1 K, encoding=NONE, compression=NONE, seqNum=476, earliestPutTs=1731328922067 2024-11-11T12:42:03,847 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0a622c967d04b2cbb7d82b8131ae323, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=502, earliestPutTs=1731328922118 2024-11-11T12:42:03,847 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 592feb8164814fa1ad9141813f928afa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1731328922760 2024-11-11T12:42:03,858 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/9feb3ea8a5ba4a6e8597b307bc2851fd is 50, key is test_row_0/B:col10/1731328922895/Put/seqid=0 2024-11-11T12:42:03,862 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#115 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:03,862 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/f8f3a34226d74c5383f6be4308ab376c is 50, key is test_row_0/C:col10/1731328922867/Put/seqid=0 2024-11-11T12:42:03,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741953_1129 (size=12301) 2024-11-11T12:42:03,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741954_1130 (size=13561) 2024-11-11T12:42:03,923 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/f8f3a34226d74c5383f6be4308ab376c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f8f3a34226d74c5383f6be4308ab376c 2024-11-11T12:42:03,930 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into f8f3a34226d74c5383f6be4308ab376c(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:03,930 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:03,930 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=13, startTime=1731328923759; duration=0sec 2024-11-11T12:42:03,930 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:03,931 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:42:04,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:04,013 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. as already flushing 2024-11-11T12:42:04,023 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328984021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 240 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328984022, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,024 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328984023, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,028 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328984024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,125 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,125 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328984125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 231 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328984125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 242 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328984128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,129 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328984129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,195 DEBUG [Thread-164 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x353bc462 to 127.0.0.1:54294 2024-11-11T12:42:04,195 DEBUG [Thread-166 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x47fe2fa7 to 127.0.0.1:54294 2024-11-11T12:42:04,195 DEBUG [Thread-160 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09ed28bb to 127.0.0.1:54294 2024-11-11T12:42:04,195 DEBUG [Thread-166 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:04,195 DEBUG [Thread-160 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:04,195 DEBUG [Thread-164 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:04,196 DEBUG [Thread-162 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x12a1285d to 127.0.0.1:54294 2024-11-11T12:42:04,196 DEBUG [Thread-162 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:04,223 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/793e2916b13642eb8d8b297f72dabb2f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/793e2916b13642eb8d8b297f72dabb2f 2024-11-11T12:42:04,233 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into 793e2916b13642eb8d8b297f72dabb2f(size=13.2 K), total size for store is 13.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:04,233 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:04,233 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=13, startTime=1731328923759; duration=0sec 2024-11-11T12:42:04,233 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:04,234 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:42:04,269 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40952 deadline: 1731328984269, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,277 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/9feb3ea8a5ba4a6e8597b307bc2851fd 2024-11-11T12:42:04,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-11T12:42:04,286 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e8239b09e8b74e34ade2924ea31d0053 is 50, key is test_row_0/C:col10/1731328922895/Put/seqid=0 2024-11-11T12:42:04,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741955_1131 (size=12301) 2024-11-11T12:42:04,327 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 233 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328984327, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328984328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,332 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328984332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,332 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 244 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328984332, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40976 deadline: 1731328984630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 243 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40950 deadline: 1731328984633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,633 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 246 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40940 deadline: 1731328984633, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,635 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:04,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 241 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40992 deadline: 1731328984635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:04,694 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=540 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e8239b09e8b74e34ade2924ea31d0053 2024-11-11T12:42:04,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/ff15b7d8ccf64852bee3599df5a42ec4 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ff15b7d8ccf64852bee3599df5a42ec4 2024-11-11T12:42:04,703 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ff15b7d8ccf64852bee3599df5a42ec4, entries=150, sequenceid=540, filesize=12.0 K 2024-11-11T12:42:04,704 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/9feb3ea8a5ba4a6e8597b307bc2851fd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/9feb3ea8a5ba4a6e8597b307bc2851fd 2024-11-11T12:42:04,707 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/9feb3ea8a5ba4a6e8597b307bc2851fd, entries=150, sequenceid=540, filesize=12.0 K 2024-11-11T12:42:04,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/e8239b09e8b74e34ade2924ea31d0053 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e8239b09e8b74e34ade2924ea31d0053 2024-11-11T12:42:04,711 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e8239b09e8b74e34ade2924ea31d0053, entries=150, sequenceid=540, filesize=12.0 K 2024-11-11T12:42:04,712 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for 0a6a9f82df0ac9ece8343137343e2f72 in 909ms, sequenceid=540, compaction requested=false 2024-11-11T12:42:04,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:04,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:04,712 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-11T12:42:04,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=29 2024-11-11T12:42:04,715 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=29, resume processing ppid=28 2024-11-11T12:42:04,715 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, ppid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5350 sec 2024-11-11T12:42:04,716 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=28, table=TestAcidGuarantees in 1.5410 sec 2024-11-11T12:42:04,957 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T12:42:05,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:05,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-11T12:42:05,135 DEBUG [Thread-157 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0e52b42a to 127.0.0.1:54294 2024-11-11T12:42:05,135 DEBUG [Thread-157 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:42:05,136 DEBUG [Thread-149 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f2091cc to 127.0.0.1:54294 2024-11-11T12:42:05,136 DEBUG [Thread-149 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:42:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:42:05,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:05,139 DEBUG [Thread-151 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09bd0964 to 127.0.0.1:54294 2024-11-11T12:42:05,139 DEBUG [Thread-151 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:05,141 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/0a81e8c2500342039418b97c61122456 is 50, key is test_row_0/A:col10/1731328925135/Put/seqid=0 2024-11-11T12:42:05,142 DEBUG [Thread-155 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x45b55c24 to 127.0.0.1:54294 2024-11-11T12:42:05,142 DEBUG [Thread-155 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:05,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741956_1132 (size=12301) 2024-11-11T12:42:05,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=28 2024-11-11T12:42:05,281 INFO [Thread-159 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 28 completed 2024-11-11T12:42:05,335 ERROR [LeaseRenewer:jenkins@localhost:42421 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:42421,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:05,546 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/0a81e8c2500342039418b97c61122456 2024-11-11T12:42:05,554 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b8cb0755261c4aae9bd99da79e311cb2 is 50, key is test_row_0/B:col10/1731328925135/Put/seqid=0 2024-11-11T12:42:05,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741957_1133 (size=12301) 2024-11-11T12:42:05,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b8cb0755261c4aae9bd99da79e311cb2 2024-11-11T12:42:05,966 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/6d82b6f253394aec9972bfa796b08c90 is 50, key is test_row_0/C:col10/1731328925135/Put/seqid=0 2024-11-11T12:42:05,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741958_1134 (size=12301) 2024-11-11T12:42:06,292 DEBUG [Thread-153 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x18cb251d to 127.0.0.1:54294 2024-11-11T12:42:06,292 DEBUG [Thread-153 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 92 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 89 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 87 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4951 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4928 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2135 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6405 rows 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2134 2024-11-11T12:42:06,292 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6402 rows 2024-11-11T12:42:06,292 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T12:42:06,293 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6fcb5f29 to 127.0.0.1:54294 2024-11-11T12:42:06,293 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:06,298 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-11T12:42:06,303 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-11T12:42:06,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:06,313 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328926313"}]},"ts":"1731328926313"} 2024-11-11T12:42:06,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-11T12:42:06,315 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-11T12:42:06,317 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-11T12:42:06,319 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:42:06,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a6a9f82df0ac9ece8343137343e2f72, UNASSIGN}] 2024-11-11T12:42:06,324 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=31, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a6a9f82df0ac9ece8343137343e2f72, UNASSIGN 2024-11-11T12:42:06,325 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=0a6a9f82df0ac9ece8343137343e2f72, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:06,326 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:42:06,326 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE; CloseRegionProcedure 0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:42:06,372 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=31.31 KB at sequenceid=560 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/6d82b6f253394aec9972bfa796b08c90 2024-11-11T12:42:06,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/0a81e8c2500342039418b97c61122456 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/0a81e8c2500342039418b97c61122456 2024-11-11T12:42:06,381 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/0a81e8c2500342039418b97c61122456, entries=150, sequenceid=560, filesize=12.0 K 2024-11-11T12:42:06,382 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/b8cb0755261c4aae9bd99da79e311cb2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b8cb0755261c4aae9bd99da79e311cb2 2024-11-11T12:42:06,386 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b8cb0755261c4aae9bd99da79e311cb2, entries=150, sequenceid=560, filesize=12.0 K 2024-11-11T12:42:06,387 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/6d82b6f253394aec9972bfa796b08c90 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/6d82b6f253394aec9972bfa796b08c90 2024-11-11T12:42:06,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/6d82b6f253394aec9972bfa796b08c90, entries=150, sequenceid=560, filesize=12.0 K 2024-11-11T12:42:06,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~93.93 KB/96180, heapSize ~246.80 KB/252720, currentSize=20.13 KB/20610 for 0a6a9f82df0ac9ece8343137343e2f72 in 1257ms, sequenceid=560, compaction requested=true 2024-11-11T12:42:06,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:06,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:06,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:06,392 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:06,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:06,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:06,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0a6a9f82df0ac9ece8343137343e2f72:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:06,392 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:06,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:06,393 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:06,394 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/A is initiating minor compaction (all files) 2024-11-11T12:42:06,394 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/A in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:06,394 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/9c15eb3169b84a2c9c2258eb1d6444db, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ff15b7d8ccf64852bee3599df5a42ec4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/0a81e8c2500342039418b97c61122456] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.3 K 2024-11-11T12:42:06,394 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:06,394 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/B is initiating minor compaction (all files) 2024-11-11T12:42:06,394 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/B in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:06,394 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9c15eb3169b84a2c9c2258eb1d6444db, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1731328922760 2024-11-11T12:42:06,394 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/793e2916b13642eb8d8b297f72dabb2f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/9feb3ea8a5ba4a6e8597b307bc2851fd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b8cb0755261c4aae9bd99da79e311cb2] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.3 K 2024-11-11T12:42:06,395 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 793e2916b13642eb8d8b297f72dabb2f, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1731328922760 2024-11-11T12:42:06,395 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ff15b7d8ccf64852bee3599df5a42ec4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1731328922885 2024-11-11T12:42:06,395 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0a81e8c2500342039418b97c61122456, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1731328924020 2024-11-11T12:42:06,395 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 9feb3ea8a5ba4a6e8597b307bc2851fd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1731328922885 2024-11-11T12:42:06,396 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b8cb0755261c4aae9bd99da79e311cb2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1731328924020 2024-11-11T12:42:06,416 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#A#compaction#120 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:06,417 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/30821cf99718467ea169cf52b9602444 is 50, key is test_row_0/A:col10/1731328925135/Put/seqid=0 2024-11-11T12:42:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-11T12:42:06,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741959_1135 (size=13663) 2024-11-11T12:42:06,431 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#B#compaction#121 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:06,431 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/ca3050a7fe9346739b613f7be4efea3d is 50, key is test_row_0/B:col10/1731328925135/Put/seqid=0 2024-11-11T12:42:06,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741960_1136 (size=13663) 2024-11-11T12:42:06,443 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/ca3050a7fe9346739b613f7be4efea3d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ca3050a7fe9346739b613f7be4efea3d 2024-11-11T12:42:06,449 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/B of 0a6a9f82df0ac9ece8343137343e2f72 into ca3050a7fe9346739b613f7be4efea3d(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:06,449 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:06,450 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/B, priority=13, startTime=1731328926392; duration=0sec 2024-11-11T12:42:06,450 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:06,450 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:B 2024-11-11T12:42:06,450 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:06,451 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38163 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:06,451 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 0a6a9f82df0ac9ece8343137343e2f72/C is initiating minor compaction (all files) 2024-11-11T12:42:06,452 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 0a6a9f82df0ac9ece8343137343e2f72/C in TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:06,452 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f8f3a34226d74c5383f6be4308ab376c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e8239b09e8b74e34ade2924ea31d0053, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/6d82b6f253394aec9972bfa796b08c90] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp, totalSize=37.3 K 2024-11-11T12:42:06,452 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f8f3a34226d74c5383f6be4308ab376c, keycount=150, bloomtype=ROW, size=13.2 K, encoding=NONE, compression=NONE, seqNum=516, earliestPutTs=1731328922760 2024-11-11T12:42:06,453 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e8239b09e8b74e34ade2924ea31d0053, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=540, earliestPutTs=1731328922885 2024-11-11T12:42:06,453 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 6d82b6f253394aec9972bfa796b08c90, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=560, earliestPutTs=1731328924020 2024-11-11T12:42:06,463 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0a6a9f82df0ac9ece8343137343e2f72#C#compaction#122 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:06,464 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/74d4d14da2e84906ad597c37bfa79413 is 50, key is test_row_0/C:col10/1731328925135/Put/seqid=0 2024-11-11T12:42:06,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741961_1137 (size=13663) 2024-11-11T12:42:06,477 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/74d4d14da2e84906ad597c37bfa79413 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/74d4d14da2e84906ad597c37bfa79413 2024-11-11T12:42:06,481 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:06,483 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/C of 0a6a9f82df0ac9ece8343137343e2f72 into 74d4d14da2e84906ad597c37bfa79413(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:06,483 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:06,483 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/C, priority=13, startTime=1731328926392; duration=0sec 2024-11-11T12:42:06,483 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:06,483 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:C 2024-11-11T12:42:06,485 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(124): Close 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:06,485 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:42:06,486 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1681): Closing 0a6a9f82df0ac9ece8343137343e2f72, disabling compactions & flushes 2024-11-11T12:42:06,486 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1942): waiting for 1 compactions to complete for region TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:06,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-11T12:42:06,831 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/30821cf99718467ea169cf52b9602444 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/30821cf99718467ea169cf52b9602444 2024-11-11T12:42:06,836 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 0a6a9f82df0ac9ece8343137343e2f72/A of 0a6a9f82df0ac9ece8343137343e2f72 into 30821cf99718467ea169cf52b9602444(size=13.3 K), total size for store is 13.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:06,836 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:06,836 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72., storeName=0a6a9f82df0ac9ece8343137343e2f72/A, priority=13, startTime=1731328926392; duration=0sec 2024-11-11T12:42:06,836 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:06,836 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:06,836 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:06,836 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0a6a9f82df0ac9ece8343137343e2f72:A 2024-11-11T12:42:06,836 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. after waiting 0 ms 2024-11-11T12:42:06,836 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:06,836 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(2837): Flushing 0a6a9f82df0ac9ece8343137343e2f72 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-11T12:42:06,836 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=A 2024-11-11T12:42:06,836 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:06,837 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=B 2024-11-11T12:42:06,837 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:06,837 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 0a6a9f82df0ac9ece8343137343e2f72, store=C 2024-11-11T12:42:06,837 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:06,840 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/479f1ede3f2c41cc9376933e015777f1 is 50, key is test_row_0/A:col10/1731328925140/Put/seqid=0 2024-11-11T12:42:06,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741962_1138 (size=12301) 2024-11-11T12:42:06,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-11T12:42:07,245 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/479f1ede3f2c41cc9376933e015777f1 2024-11-11T12:42:07,252 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/c219c3d31267444e83948ac627853528 is 50, key is test_row_0/B:col10/1731328925140/Put/seqid=0 2024-11-11T12:42:07,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741963_1139 (size=12301) 2024-11-11T12:42:07,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-11T12:42:07,657 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/c219c3d31267444e83948ac627853528 2024-11-11T12:42:07,664 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/7ab898bf78c64a7d80e153620c20be97 is 50, key is test_row_0/C:col10/1731328925140/Put/seqid=0 2024-11-11T12:42:07,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741964_1140 (size=12301) 2024-11-11T12:42:08,068 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=569 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/7ab898bf78c64a7d80e153620c20be97 2024-11-11T12:42:08,074 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/A/479f1ede3f2c41cc9376933e015777f1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/479f1ede3f2c41cc9376933e015777f1 2024-11-11T12:42:08,079 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/479f1ede3f2c41cc9376933e015777f1, entries=150, sequenceid=569, filesize=12.0 K 2024-11-11T12:42:08,081 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/B/c219c3d31267444e83948ac627853528 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c219c3d31267444e83948ac627853528 2024-11-11T12:42:08,085 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c219c3d31267444e83948ac627853528, entries=150, sequenceid=569, filesize=12.0 K 2024-11-11T12:42:08,086 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/.tmp/C/7ab898bf78c64a7d80e153620c20be97 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/7ab898bf78c64a7d80e153620c20be97 2024-11-11T12:42:08,091 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/7ab898bf78c64a7d80e153620c20be97, entries=150, sequenceid=569, filesize=12.0 K 2024-11-11T12:42:08,092 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 0a6a9f82df0ac9ece8343137343e2f72 in 1256ms, sequenceid=569, compaction requested=false 2024-11-11T12:42:08,093 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b43e7f9cd7e6420a8644f311bb811aeb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/fa7db528b8a94127bef905a27e25f742, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4c608721ed654cef97a5bc9e59acac72, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/5a3e25f98fa847e6a460e7e4de0ff0ec, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e43491779e7c45b18fd160a121b47c67, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/595810c80a84428888e00893f35cc2db, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8b41402ddb154874a7f7c262fcfccb94, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2ab5dd805fad4a789fb1a43f440514e3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a67745ec0e534241b253aec1af322d56, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/739b9cb76e93401185049eb82d3d2231, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2aaf8bfee97641bba6b22aabc2c732ff, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/212d721c1597484fa3c8e3806edfd384, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/140f9a56bced4bb89fc29a4a0c01d191, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4e6ab55b40ee4bc0a845e72de17ea408, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/dfb70cfdf22047c3927b4e39a5725a61, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/80dbbb17ff594096b0437a3226934c4e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3e0644706af14c63bc575da469286f20, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e26410145d4f41d698f82ae7bb8e2067, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f3a065dd542c4124949e23ee25867a0e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/6aaf745770d142ffa3130539a2d9f17a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f0c8f27e57d3414980f6d1da4c25298e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/67dbab01b633428b9496322b66002bbe, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a4814220e7ac4861bc502d3d51723e7a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/13a11fab1cf64541890ac28720e61b32, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/da2ac016e0aa483aa55088658ba5e9af, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ea68954294ca4ba2a8a18e424d44b4b1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3d21d419015040eaa42c8f4dc3830b0f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b1de8f17053e41b7920e727aad397c2d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/c16accc145144b8b8f89e4010b1affe0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/db5d6a4a04b24fd58373f29b7558f2c7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8df0c1838be94cff92d110d90851c4cf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e02edd313cc049fabfd68b14c87de386, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/1346ae30a1a842b99353f291551c0316, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/53f409bcf6294ea08c36f192a845d082, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f74ef7fc4d6642e4bc38ab63cb3d392e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/699601e8e80c42f7bb24030b2b40ef16, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/d6c9fdf21ad94846a22f15a22d641fa9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/9c15eb3169b84a2c9c2258eb1d6444db, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ff15b7d8ccf64852bee3599df5a42ec4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/0a81e8c2500342039418b97c61122456] to archive 2024-11-11T12:42:08,097 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:42:08,103 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b43e7f9cd7e6420a8644f311bb811aeb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b43e7f9cd7e6420a8644f311bb811aeb 2024-11-11T12:42:08,105 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/fa7db528b8a94127bef905a27e25f742 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/fa7db528b8a94127bef905a27e25f742 2024-11-11T12:42:08,106 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4c608721ed654cef97a5bc9e59acac72 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4c608721ed654cef97a5bc9e59acac72 2024-11-11T12:42:08,108 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/5a3e25f98fa847e6a460e7e4de0ff0ec to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/5a3e25f98fa847e6a460e7e4de0ff0ec 2024-11-11T12:42:08,110 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e43491779e7c45b18fd160a121b47c67 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e43491779e7c45b18fd160a121b47c67 2024-11-11T12:42:08,112 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/595810c80a84428888e00893f35cc2db to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/595810c80a84428888e00893f35cc2db 2024-11-11T12:42:08,116 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8b41402ddb154874a7f7c262fcfccb94 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8b41402ddb154874a7f7c262fcfccb94 2024-11-11T12:42:08,118 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2ab5dd805fad4a789fb1a43f440514e3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2ab5dd805fad4a789fb1a43f440514e3 2024-11-11T12:42:08,120 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a67745ec0e534241b253aec1af322d56 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a67745ec0e534241b253aec1af322d56 2024-11-11T12:42:08,122 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/739b9cb76e93401185049eb82d3d2231 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/739b9cb76e93401185049eb82d3d2231 2024-11-11T12:42:08,125 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2aaf8bfee97641bba6b22aabc2c732ff to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/2aaf8bfee97641bba6b22aabc2c732ff 2024-11-11T12:42:08,127 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/212d721c1597484fa3c8e3806edfd384 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/212d721c1597484fa3c8e3806edfd384 2024-11-11T12:42:08,129 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/140f9a56bced4bb89fc29a4a0c01d191 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/140f9a56bced4bb89fc29a4a0c01d191 2024-11-11T12:42:08,131 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4e6ab55b40ee4bc0a845e72de17ea408 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/4e6ab55b40ee4bc0a845e72de17ea408 2024-11-11T12:42:08,132 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/dfb70cfdf22047c3927b4e39a5725a61 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/dfb70cfdf22047c3927b4e39a5725a61 2024-11-11T12:42:08,135 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/80dbbb17ff594096b0437a3226934c4e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/80dbbb17ff594096b0437a3226934c4e 2024-11-11T12:42:08,136 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3e0644706af14c63bc575da469286f20 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3e0644706af14c63bc575da469286f20 2024-11-11T12:42:08,138 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e26410145d4f41d698f82ae7bb8e2067 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e26410145d4f41d698f82ae7bb8e2067 2024-11-11T12:42:08,140 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f3a065dd542c4124949e23ee25867a0e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f3a065dd542c4124949e23ee25867a0e 2024-11-11T12:42:08,141 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/6aaf745770d142ffa3130539a2d9f17a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/6aaf745770d142ffa3130539a2d9f17a 2024-11-11T12:42:08,143 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f0c8f27e57d3414980f6d1da4c25298e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f0c8f27e57d3414980f6d1da4c25298e 2024-11-11T12:42:08,145 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/67dbab01b633428b9496322b66002bbe to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/67dbab01b633428b9496322b66002bbe 2024-11-11T12:42:08,147 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a4814220e7ac4861bc502d3d51723e7a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/a4814220e7ac4861bc502d3d51723e7a 2024-11-11T12:42:08,153 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/13a11fab1cf64541890ac28720e61b32 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/13a11fab1cf64541890ac28720e61b32 2024-11-11T12:42:08,168 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/da2ac016e0aa483aa55088658ba5e9af to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/da2ac016e0aa483aa55088658ba5e9af 2024-11-11T12:42:08,170 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ea68954294ca4ba2a8a18e424d44b4b1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ea68954294ca4ba2a8a18e424d44b4b1 2024-11-11T12:42:08,172 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3d21d419015040eaa42c8f4dc3830b0f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/3d21d419015040eaa42c8f4dc3830b0f 2024-11-11T12:42:08,174 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b1de8f17053e41b7920e727aad397c2d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/b1de8f17053e41b7920e727aad397c2d 2024-11-11T12:42:08,175 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/c16accc145144b8b8f89e4010b1affe0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/c16accc145144b8b8f89e4010b1affe0 2024-11-11T12:42:08,177 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/db5d6a4a04b24fd58373f29b7558f2c7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/db5d6a4a04b24fd58373f29b7558f2c7 2024-11-11T12:42:08,178 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8df0c1838be94cff92d110d90851c4cf to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/8df0c1838be94cff92d110d90851c4cf 2024-11-11T12:42:08,180 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e02edd313cc049fabfd68b14c87de386 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/e02edd313cc049fabfd68b14c87de386 2024-11-11T12:42:08,181 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/1346ae30a1a842b99353f291551c0316 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/1346ae30a1a842b99353f291551c0316 2024-11-11T12:42:08,183 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/53f409bcf6294ea08c36f192a845d082 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/53f409bcf6294ea08c36f192a845d082 2024-11-11T12:42:08,184 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f74ef7fc4d6642e4bc38ab63cb3d392e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/f74ef7fc4d6642e4bc38ab63cb3d392e 2024-11-11T12:42:08,185 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/699601e8e80c42f7bb24030b2b40ef16 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/699601e8e80c42f7bb24030b2b40ef16 2024-11-11T12:42:08,187 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/d6c9fdf21ad94846a22f15a22d641fa9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/d6c9fdf21ad94846a22f15a22d641fa9 2024-11-11T12:42:08,188 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/9c15eb3169b84a2c9c2258eb1d6444db to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/9c15eb3169b84a2c9c2258eb1d6444db 2024-11-11T12:42:08,190 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ff15b7d8ccf64852bee3599df5a42ec4 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/ff15b7d8ccf64852bee3599df5a42ec4 2024-11-11T12:42:08,191 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/0a81e8c2500342039418b97c61122456 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/0a81e8c2500342039418b97c61122456 2024-11-11T12:42:08,210 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f380097b5c614a92af7e79cbf6323415, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/421e5b3244be4c7ca9b5ecf7954831a1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fdbfbeb7b62349ba99815838c8da1d52, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/13754117f51946ffbf487e065e75ade4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/6b9b00cf7632497d8ef21e004f1caf80, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/eec19bb8cf584693a2aa3ad7dac623cf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ff2938e9148542d4bfe1c1ac34764dfd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/2cf0fee2544c4cd9b80bb7b5f9428a8c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c6809e4020064ef286bc65bd88923d7d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4c0c913b27db42a9b003bdbf2abb7a03, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/95f43ce6616d4e448bb7826a310ca256, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/bb39bcb80b4a4255a39c61b1f0a317ce, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/28a039f20feb4230add972ef87868bfd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f29a09338e1141b0a415d0d0de27adc2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3bd0bfec634a4a50a7f934d762555bb9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/0e87f5c58036436d98b0e690684a9fe4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/52b4cf83763349f9828582120cd737d3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4dfd418c85ad4ad0a059a184ad233d21, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fef0ae4cd925473f9ccf12510a913d13, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/097207f56fa24fd7a42aa7a1fdd34450, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3fdd73c2ec24423486a8794eaf01016b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/77f42f0d39b74daea6ca4080351819ab, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/5cee728065e14db18d61087e74327ca7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/538092bfd2e748d0a1ffcd80925ff26f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/425c98e1c65148d293fd874eaf24fd8a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b5d772983ef14fafbba416a36288bd8b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/32ac4c2b45084e6d978e48c97f1980df, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3a77e60ddc0a43629c98c7d180cbdbb3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/636ecde85bde44489f00b79a03a0a41b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/89360db6cc4e486e8f0d1094a5288e6e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/084f5f8b68c1430db71aafff10759dfa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/7ac996e5a47140bf92e3edf5cf298e8d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/d7f5eb4777ca4037a848d32e8844d74a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4a313b2c6f5a44b1ac1ff69053e1dcc6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ae86e81b69ad4b2080e602523a641948, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/843b1b55c3cb4dd7a014d16888402ee9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/793e2916b13642eb8d8b297f72dabb2f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b132f333daba4fa096bd8bc1897d97b9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/9feb3ea8a5ba4a6e8597b307bc2851fd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b8cb0755261c4aae9bd99da79e311cb2] to archive 2024-11-11T12:42:08,211 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:42:08,213 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f380097b5c614a92af7e79cbf6323415 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f380097b5c614a92af7e79cbf6323415 2024-11-11T12:42:08,215 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/421e5b3244be4c7ca9b5ecf7954831a1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/421e5b3244be4c7ca9b5ecf7954831a1 2024-11-11T12:42:08,216 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fdbfbeb7b62349ba99815838c8da1d52 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fdbfbeb7b62349ba99815838c8da1d52 2024-11-11T12:42:08,218 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/13754117f51946ffbf487e065e75ade4 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/13754117f51946ffbf487e065e75ade4 2024-11-11T12:42:08,219 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/6b9b00cf7632497d8ef21e004f1caf80 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/6b9b00cf7632497d8ef21e004f1caf80 2024-11-11T12:42:08,220 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/eec19bb8cf584693a2aa3ad7dac623cf to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/eec19bb8cf584693a2aa3ad7dac623cf 2024-11-11T12:42:08,222 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ff2938e9148542d4bfe1c1ac34764dfd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ff2938e9148542d4bfe1c1ac34764dfd 2024-11-11T12:42:08,223 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/2cf0fee2544c4cd9b80bb7b5f9428a8c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/2cf0fee2544c4cd9b80bb7b5f9428a8c 2024-11-11T12:42:08,224 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c6809e4020064ef286bc65bd88923d7d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c6809e4020064ef286bc65bd88923d7d 2024-11-11T12:42:08,225 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4c0c913b27db42a9b003bdbf2abb7a03 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4c0c913b27db42a9b003bdbf2abb7a03 2024-11-11T12:42:08,227 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/95f43ce6616d4e448bb7826a310ca256 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/95f43ce6616d4e448bb7826a310ca256 2024-11-11T12:42:08,228 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/bb39bcb80b4a4255a39c61b1f0a317ce to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/bb39bcb80b4a4255a39c61b1f0a317ce 2024-11-11T12:42:08,230 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/28a039f20feb4230add972ef87868bfd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/28a039f20feb4230add972ef87868bfd 2024-11-11T12:42:08,232 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f29a09338e1141b0a415d0d0de27adc2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/f29a09338e1141b0a415d0d0de27adc2 2024-11-11T12:42:08,233 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3bd0bfec634a4a50a7f934d762555bb9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3bd0bfec634a4a50a7f934d762555bb9 2024-11-11T12:42:08,235 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/0e87f5c58036436d98b0e690684a9fe4 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/0e87f5c58036436d98b0e690684a9fe4 2024-11-11T12:42:08,236 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/52b4cf83763349f9828582120cd737d3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/52b4cf83763349f9828582120cd737d3 2024-11-11T12:42:08,241 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4dfd418c85ad4ad0a059a184ad233d21 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4dfd418c85ad4ad0a059a184ad233d21 2024-11-11T12:42:08,242 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fef0ae4cd925473f9ccf12510a913d13 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/fef0ae4cd925473f9ccf12510a913d13 2024-11-11T12:42:08,244 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/097207f56fa24fd7a42aa7a1fdd34450 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/097207f56fa24fd7a42aa7a1fdd34450 2024-11-11T12:42:08,245 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3fdd73c2ec24423486a8794eaf01016b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3fdd73c2ec24423486a8794eaf01016b 2024-11-11T12:42:08,247 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/77f42f0d39b74daea6ca4080351819ab to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/77f42f0d39b74daea6ca4080351819ab 2024-11-11T12:42:08,249 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/5cee728065e14db18d61087e74327ca7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/5cee728065e14db18d61087e74327ca7 2024-11-11T12:42:08,250 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/538092bfd2e748d0a1ffcd80925ff26f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/538092bfd2e748d0a1ffcd80925ff26f 2024-11-11T12:42:08,252 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/425c98e1c65148d293fd874eaf24fd8a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/425c98e1c65148d293fd874eaf24fd8a 2024-11-11T12:42:08,253 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b5d772983ef14fafbba416a36288bd8b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b5d772983ef14fafbba416a36288bd8b 2024-11-11T12:42:08,254 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/32ac4c2b45084e6d978e48c97f1980df to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/32ac4c2b45084e6d978e48c97f1980df 2024-11-11T12:42:08,255 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3a77e60ddc0a43629c98c7d180cbdbb3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/3a77e60ddc0a43629c98c7d180cbdbb3 2024-11-11T12:42:08,256 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/636ecde85bde44489f00b79a03a0a41b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/636ecde85bde44489f00b79a03a0a41b 2024-11-11T12:42:08,257 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/89360db6cc4e486e8f0d1094a5288e6e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/89360db6cc4e486e8f0d1094a5288e6e 2024-11-11T12:42:08,258 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/084f5f8b68c1430db71aafff10759dfa to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/084f5f8b68c1430db71aafff10759dfa 2024-11-11T12:42:08,259 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/7ac996e5a47140bf92e3edf5cf298e8d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/7ac996e5a47140bf92e3edf5cf298e8d 2024-11-11T12:42:08,260 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/d7f5eb4777ca4037a848d32e8844d74a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/d7f5eb4777ca4037a848d32e8844d74a 2024-11-11T12:42:08,261 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4a313b2c6f5a44b1ac1ff69053e1dcc6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/4a313b2c6f5a44b1ac1ff69053e1dcc6 2024-11-11T12:42:08,262 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ae86e81b69ad4b2080e602523a641948 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ae86e81b69ad4b2080e602523a641948 2024-11-11T12:42:08,264 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/843b1b55c3cb4dd7a014d16888402ee9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/843b1b55c3cb4dd7a014d16888402ee9 2024-11-11T12:42:08,265 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/793e2916b13642eb8d8b297f72dabb2f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/793e2916b13642eb8d8b297f72dabb2f 2024-11-11T12:42:08,266 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b132f333daba4fa096bd8bc1897d97b9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b132f333daba4fa096bd8bc1897d97b9 2024-11-11T12:42:08,267 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/9feb3ea8a5ba4a6e8597b307bc2851fd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/9feb3ea8a5ba4a6e8597b307bc2851fd 2024-11-11T12:42:08,267 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b8cb0755261c4aae9bd99da79e311cb2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/b8cb0755261c4aae9bd99da79e311cb2 2024-11-11T12:42:08,269 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/d9aea97c714c4f2b80e46314203afec3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/540dcfe32bf144478f794580454e5922, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/293e90c3ddd847d7a2405afda7404815, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/62a3956feea64c3ea367b5a9cab87ae5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f0f0394872f40c682e9b01a1a351917, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/652993a0ac124b73b839181033be81a6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/49b2501af9d946d89928dc7b48af7c85, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8e8cebdd81554fc59810bc2971a656b6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/60768f30dce74062b5b05a2056ca116d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/3e294efcbaa04d3abe887252cb206fc2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/56740a23db4b4adcb627e76f2aa29208, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/93ef6e858f3747eb9b6d75700ccf25bd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/5d5a93697497481dbda18c20b60a18b9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/46439a2f45d2460eb751aa81a96dd3c3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1335f177bd0a4c97a9f3bf1ec983e2ba, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a74ff1da92ea4ec4be882435eec03c35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/191ab731b88746cea8de41ba7f720df1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ef821e9b4cca4d27974cda9a5e0d097f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8c9a71df51c34a968069e3b2d41d19a5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e5b75e23d78b4ef6945c41271415ea99, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a01863d085e645be9dc02e3612c9bb4b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f05e5378f994f598361b65d5d21c9a0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1e01a4ae3de54e56ba6bcd9dd41e25ec, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8bd87703cc59466ca6d2c259bdeaf9be, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/765bba0ac80e43aea01f1a17101a28b3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1d85c460f9a84041b25af99ea5cc594b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ea5c878be7694dba95371cdc619239c8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/462b91d9c53e4f22b18b9b57ee92f9f2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b6f0d91439e44a7e9959cc9d2a90698e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e21957cdc0834952bb63eec044a972e4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/08c23f048eca4cd69f22f42118c48a49, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b46ecb17df784b1b966eb5fcd728100d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/422ec05252644b9c927ac4ff9cef2f9a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/efd83bab1b4b454d97da89daea7841a9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0db273232e444c4981a3118b1bc14742, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f0a622c967d04b2cbb7d82b8131ae323, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f8f3a34226d74c5383f6be4308ab376c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/592feb8164814fa1ad9141813f928afa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e8239b09e8b74e34ade2924ea31d0053, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/6d82b6f253394aec9972bfa796b08c90] to archive 2024-11-11T12:42:08,270 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:42:08,271 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/d9aea97c714c4f2b80e46314203afec3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/d9aea97c714c4f2b80e46314203afec3 2024-11-11T12:42:08,272 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/540dcfe32bf144478f794580454e5922 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/540dcfe32bf144478f794580454e5922 2024-11-11T12:42:08,273 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/293e90c3ddd847d7a2405afda7404815 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/293e90c3ddd847d7a2405afda7404815 2024-11-11T12:42:08,274 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/62a3956feea64c3ea367b5a9cab87ae5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/62a3956feea64c3ea367b5a9cab87ae5 2024-11-11T12:42:08,275 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f0f0394872f40c682e9b01a1a351917 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f0f0394872f40c682e9b01a1a351917 2024-11-11T12:42:08,276 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/652993a0ac124b73b839181033be81a6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/652993a0ac124b73b839181033be81a6 2024-11-11T12:42:08,277 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/49b2501af9d946d89928dc7b48af7c85 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/49b2501af9d946d89928dc7b48af7c85 2024-11-11T12:42:08,278 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8e8cebdd81554fc59810bc2971a656b6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8e8cebdd81554fc59810bc2971a656b6 2024-11-11T12:42:08,279 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/60768f30dce74062b5b05a2056ca116d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/60768f30dce74062b5b05a2056ca116d 2024-11-11T12:42:08,280 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/3e294efcbaa04d3abe887252cb206fc2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/3e294efcbaa04d3abe887252cb206fc2 2024-11-11T12:42:08,282 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/56740a23db4b4adcb627e76f2aa29208 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/56740a23db4b4adcb627e76f2aa29208 2024-11-11T12:42:08,284 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/93ef6e858f3747eb9b6d75700ccf25bd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/93ef6e858f3747eb9b6d75700ccf25bd 2024-11-11T12:42:08,285 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/5d5a93697497481dbda18c20b60a18b9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/5d5a93697497481dbda18c20b60a18b9 2024-11-11T12:42:08,287 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/46439a2f45d2460eb751aa81a96dd3c3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/46439a2f45d2460eb751aa81a96dd3c3 2024-11-11T12:42:08,290 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1335f177bd0a4c97a9f3bf1ec983e2ba to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1335f177bd0a4c97a9f3bf1ec983e2ba 2024-11-11T12:42:08,291 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a74ff1da92ea4ec4be882435eec03c35 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a74ff1da92ea4ec4be882435eec03c35 2024-11-11T12:42:08,292 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/191ab731b88746cea8de41ba7f720df1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/191ab731b88746cea8de41ba7f720df1 2024-11-11T12:42:08,293 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ef821e9b4cca4d27974cda9a5e0d097f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ef821e9b4cca4d27974cda9a5e0d097f 2024-11-11T12:42:08,295 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8c9a71df51c34a968069e3b2d41d19a5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8c9a71df51c34a968069e3b2d41d19a5 2024-11-11T12:42:08,296 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e5b75e23d78b4ef6945c41271415ea99 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e5b75e23d78b4ef6945c41271415ea99 2024-11-11T12:42:08,298 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a01863d085e645be9dc02e3612c9bb4b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/a01863d085e645be9dc02e3612c9bb4b 2024-11-11T12:42:08,299 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f05e5378f994f598361b65d5d21c9a0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0f05e5378f994f598361b65d5d21c9a0 2024-11-11T12:42:08,300 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1e01a4ae3de54e56ba6bcd9dd41e25ec to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1e01a4ae3de54e56ba6bcd9dd41e25ec 2024-11-11T12:42:08,302 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8bd87703cc59466ca6d2c259bdeaf9be to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/8bd87703cc59466ca6d2c259bdeaf9be 2024-11-11T12:42:08,303 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/765bba0ac80e43aea01f1a17101a28b3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/765bba0ac80e43aea01f1a17101a28b3 2024-11-11T12:42:08,304 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1d85c460f9a84041b25af99ea5cc594b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/1d85c460f9a84041b25af99ea5cc594b 2024-11-11T12:42:08,305 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ea5c878be7694dba95371cdc619239c8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/ea5c878be7694dba95371cdc619239c8 2024-11-11T12:42:08,307 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/462b91d9c53e4f22b18b9b57ee92f9f2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/462b91d9c53e4f22b18b9b57ee92f9f2 2024-11-11T12:42:08,308 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b6f0d91439e44a7e9959cc9d2a90698e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b6f0d91439e44a7e9959cc9d2a90698e 2024-11-11T12:42:08,309 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e21957cdc0834952bb63eec044a972e4 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e21957cdc0834952bb63eec044a972e4 2024-11-11T12:42:08,310 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/08c23f048eca4cd69f22f42118c48a49 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/08c23f048eca4cd69f22f42118c48a49 2024-11-11T12:42:08,311 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b46ecb17df784b1b966eb5fcd728100d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/b46ecb17df784b1b966eb5fcd728100d 2024-11-11T12:42:08,312 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/422ec05252644b9c927ac4ff9cef2f9a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/422ec05252644b9c927ac4ff9cef2f9a 2024-11-11T12:42:08,313 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/efd83bab1b4b454d97da89daea7841a9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/efd83bab1b4b454d97da89daea7841a9 2024-11-11T12:42:08,315 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0db273232e444c4981a3118b1bc14742 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/0db273232e444c4981a3118b1bc14742 2024-11-11T12:42:08,316 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f0a622c967d04b2cbb7d82b8131ae323 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f0a622c967d04b2cbb7d82b8131ae323 2024-11-11T12:42:08,317 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f8f3a34226d74c5383f6be4308ab376c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/f8f3a34226d74c5383f6be4308ab376c 2024-11-11T12:42:08,318 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/592feb8164814fa1ad9141813f928afa to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/592feb8164814fa1ad9141813f928afa 2024-11-11T12:42:08,319 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e8239b09e8b74e34ade2924ea31d0053 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/e8239b09e8b74e34ade2924ea31d0053 2024-11-11T12:42:08,320 DEBUG [StoreCloser-TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/6d82b6f253394aec9972bfa796b08c90 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/6d82b6f253394aec9972bfa796b08c90 2024-11-11T12:42:08,326 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/recovered.edits/572.seqid, newMaxSeqId=572, maxSeqId=1 2024-11-11T12:42:08,328 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72. 2024-11-11T12:42:08,329 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] regionserver.HRegion(1635): Region close journal for 0a6a9f82df0ac9ece8343137343e2f72: 2024-11-11T12:42:08,330 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=33}] handler.UnassignRegionHandler(170): Closed 0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:08,331 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=0a6a9f82df0ac9ece8343137343e2f72, regionState=CLOSED 2024-11-11T12:42:08,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=32 2024-11-11T12:42:08,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=32, state=SUCCESS; CloseRegionProcedure 0a6a9f82df0ac9ece8343137343e2f72, server=32e78532c8b1,44673,1731328897232 in 2.0060 sec 2024-11-11T12:42:08,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=31 2024-11-11T12:42:08,335 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=31, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=0a6a9f82df0ac9ece8343137343e2f72, UNASSIGN in 2.0110 sec 2024-11-11T12:42:08,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=31, resume processing ppid=30 2024-11-11T12:42:08,337 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.0170 sec 2024-11-11T12:42:08,338 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328928338"}]},"ts":"1731328928338"} 2024-11-11T12:42:08,339 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-11T12:42:08,341 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-11T12:42:08,343 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0370 sec 2024-11-11T12:42:08,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-11-11T12:42:08,421 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 30 completed 2024-11-11T12:42:08,425 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-11T12:42:08,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:08,430 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=34, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:08,431 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=34, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:08,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-11T12:42:08,435 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:08,439 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/recovered.edits] 2024-11-11T12:42:08,442 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/30821cf99718467ea169cf52b9602444 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/30821cf99718467ea169cf52b9602444 2024-11-11T12:42:08,443 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/479f1ede3f2c41cc9376933e015777f1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/A/479f1ede3f2c41cc9376933e015777f1 2024-11-11T12:42:08,446 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c219c3d31267444e83948ac627853528 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/c219c3d31267444e83948ac627853528 2024-11-11T12:42:08,448 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ca3050a7fe9346739b613f7be4efea3d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/B/ca3050a7fe9346739b613f7be4efea3d 2024-11-11T12:42:08,450 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/74d4d14da2e84906ad597c37bfa79413 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/74d4d14da2e84906ad597c37bfa79413 2024-11-11T12:42:08,452 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/7ab898bf78c64a7d80e153620c20be97 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/C/7ab898bf78c64a7d80e153620c20be97 2024-11-11T12:42:08,455 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/recovered.edits/572.seqid to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72/recovered.edits/572.seqid 2024-11-11T12:42:08,456 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/0a6a9f82df0ac9ece8343137343e2f72 2024-11-11T12:42:08,456 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-11T12:42:08,461 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=34, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:08,466 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-11T12:42:08,472 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-11T12:42:08,509 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-11T12:42:08,510 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=34, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:08,510 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-11T12:42:08,510 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731328928510"}]},"ts":"9223372036854775807"} 2024-11-11T12:42:08,517 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-11T12:42:08,517 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 0a6a9f82df0ac9ece8343137343e2f72, NAME => 'TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72.', STARTKEY => '', ENDKEY => ''}] 2024-11-11T12:42:08,517 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-11T12:42:08,517 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731328928517"}]},"ts":"9223372036854775807"} 2024-11-11T12:42:08,520 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-11T12:42:08,525 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=34, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:08,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 100 msec 2024-11-11T12:42:08,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=34 2024-11-11T12:42:08,532 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 34 completed 2024-11-11T12:42:08,551 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMixedAtomicity Thread=241 (was 219) Potentially hanging thread: hconnection-0x23ab8f3b-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x23ab8f3b-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1127917811_22 at /127.0.0.1:49454 [Waiting for operation #271] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1127917811_22 at /127.0.0.1:40766 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x23ab8f3b-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1424648804_22 at /127.0.0.1:40782 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS:0;32e78532c8b1:44673-shortCompactions-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.PriorityBlockingQueue.take(PriorityBlockingQueue.java:535) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1127917811_22 at /127.0.0.1:55010 [Waiting for operation #258] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x23ab8f3b-shared-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1424648804_22 at /127.0.0.1:55002 [Waiting for operation #256] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=462 (was 444) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=754 (was 695) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=2454 (was 3156) 2024-11-11T12:42:08,563 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=241, OpenFileDescriptor=462, MaxFileDescriptor=1048576, SystemLoadAverage=754, ProcessCount=11, AvailableMemoryMB=2451 2024-11-11T12:42:08,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:42:08,566 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:42:08,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:08,574 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T12:42:08,574 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:08,574 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 35 2024-11-11T12:42:08,575 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T12:42:08,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-11T12:42:08,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741965_1141 (size=960) 2024-11-11T12:42:08,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-11T12:42:08,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-11T12:42:09,001 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:42:09,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741966_1142 (size=53) 2024-11-11T12:42:09,032 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:42:09,032 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing fe7e7af7c234f1775e0b775751ee14f9, disabling compactions & flushes 2024-11-11T12:42:09,032 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,032 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,032 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. after waiting 0 ms 2024-11-11T12:42:09,032 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,032 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,032 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:09,034 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T12:42:09,035 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731328929034"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731328929034"}]},"ts":"1731328929034"} 2024-11-11T12:42:09,038 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T12:42:09,047 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T12:42:09,047 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328929047"}]},"ts":"1731328929047"} 2024-11-11T12:42:09,049 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-11T12:42:09,065 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, ASSIGN}] 2024-11-11T12:42:09,073 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, ASSIGN 2024-11-11T12:42:09,074 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=36, ppid=35, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, ASSIGN; state=OFFLINE, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=false 2024-11-11T12:42:09,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-11T12:42:09,225 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=fe7e7af7c234f1775e0b775751ee14f9, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:09,227 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=37, ppid=36, state=RUNNABLE; OpenRegionProcedure fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:42:09,379 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:09,382 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,383 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7285): Opening region: {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:42:09,383 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,383 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:42:09,383 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7327): checking encryption for fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,383 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(7330): checking classloading for fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,384 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,386 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:09,386 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe7e7af7c234f1775e0b775751ee14f9 columnFamilyName A 2024-11-11T12:42:09,386 DEBUG [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:09,387 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(327): Store=fe7e7af7c234f1775e0b775751ee14f9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:09,387 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,388 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:09,388 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:42421 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:42421,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:09,389 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe7e7af7c234f1775e0b775751ee14f9 columnFamilyName B 2024-11-11T12:42:09,389 DEBUG [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:09,389 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(327): Store=fe7e7af7c234f1775e0b775751ee14f9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:09,389 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,390 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:09,390 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe7e7af7c234f1775e0b775751ee14f9 columnFamilyName C 2024-11-11T12:42:09,390 DEBUG [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:09,391 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(327): Store=fe7e7af7c234f1775e0b775751ee14f9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:09,391 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,392 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,392 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,393 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:42:09,394 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1085): writing seq id for fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,396 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:42:09,397 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1102): Opened fe7e7af7c234f1775e0b775751ee14f9; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75484526, jitterRate=0.12480708956718445}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:42:09,398 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegion(1001): Region open journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:09,398 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., pid=37, masterSystemTime=1731328929379 2024-11-11T12:42:09,400 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,400 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=37}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,401 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=36 updating hbase:meta row=fe7e7af7c234f1775e0b775751ee14f9, regionState=OPEN, openSeqNum=2, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:09,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=37, resume processing ppid=36 2024-11-11T12:42:09,404 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=36, state=SUCCESS; OpenRegionProcedure fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 in 175 msec 2024-11-11T12:42:09,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-11-11T12:42:09,406 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, ASSIGN in 339 msec 2024-11-11T12:42:09,406 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T12:42:09,407 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328929406"}]},"ts":"1731328929406"} 2024-11-11T12:42:09,408 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-11T12:42:09,411 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=35, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T12:42:09,412 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 845 msec 2024-11-11T12:42:09,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-11-11T12:42:09,680 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 35 completed 2024-11-11T12:42:09,683 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a9b9802 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@118b007e 2024-11-11T12:42:09,687 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d29de25, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:09,690 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:09,692 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:57324, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:09,694 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T12:42:09,696 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44226, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T12:42:09,704 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:42:09,705 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:42:09,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:09,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741967_1143 (size=996) 2024-11-11T12:42:09,758 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-11T12:42:09,759 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-11T12:42:09,773 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:42:09,791 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, REOPEN/MOVE}] 2024-11-11T12:42:09,792 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, REOPEN/MOVE 2024-11-11T12:42:09,792 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=fe7e7af7c234f1775e0b775751ee14f9, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:09,794 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:42:09,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=41, ppid=40, state=RUNNABLE; CloseRegionProcedure fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:42:09,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:09,947 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(124): Close fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,947 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:42:09,947 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1681): Closing fe7e7af7c234f1775e0b775751ee14f9, disabling compactions & flushes 2024-11-11T12:42:09,947 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,947 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,947 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. after waiting 0 ms 2024-11-11T12:42:09,947 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,952 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-11T12:42:09,953 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:09,953 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegion(1635): Region close journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:09,953 WARN [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] regionserver.HRegionServer(3786): Not adding moved region record: fe7e7af7c234f1775e0b775751ee14f9 to self. 2024-11-11T12:42:09,958 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=fe7e7af7c234f1775e0b775751ee14f9, regionState=CLOSED 2024-11-11T12:42:09,958 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=41}] handler.UnassignRegionHandler(170): Closed fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:09,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=41, resume processing ppid=40 2024-11-11T12:42:09,962 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, ppid=40, state=SUCCESS; CloseRegionProcedure fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 in 166 msec 2024-11-11T12:42:09,963 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, REOPEN/MOVE; state=CLOSED, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=true 2024-11-11T12:42:10,114 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=fe7e7af7c234f1775e0b775751ee14f9, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,115 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=40, state=RUNNABLE; OpenRegionProcedure fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:42:10,269 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,273 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:10,273 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7285): Opening region: {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:42:10,274 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,274 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:42:10,274 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7327): checking encryption for fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,274 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(7330): checking classloading for fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,278 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,279 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:10,284 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe7e7af7c234f1775e0b775751ee14f9 columnFamilyName A 2024-11-11T12:42:10,286 DEBUG [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:10,287 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(327): Store=fe7e7af7c234f1775e0b775751ee14f9/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:10,287 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,288 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:10,289 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe7e7af7c234f1775e0b775751ee14f9 columnFamilyName B 2024-11-11T12:42:10,289 DEBUG [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:10,289 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(327): Store=fe7e7af7c234f1775e0b775751ee14f9/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:10,289 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,290 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:10,290 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fe7e7af7c234f1775e0b775751ee14f9 columnFamilyName C 2024-11-11T12:42:10,290 DEBUG [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:10,291 INFO [StoreOpener-fe7e7af7c234f1775e0b775751ee14f9-1 {}] regionserver.HStore(327): Store=fe7e7af7c234f1775e0b775751ee14f9/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:10,291 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:10,292 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,293 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,294 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:42:10,296 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1085): writing seq id for fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,297 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1102): Opened fe7e7af7c234f1775e0b775751ee14f9; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65022167, jitterRate=-0.031094208359718323}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:42:10,298 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegion(1001): Region open journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:10,299 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., pid=42, masterSystemTime=1731328930269 2024-11-11T12:42:10,301 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:10,301 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=42}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:10,302 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=40 updating hbase:meta row=fe7e7af7c234f1775e0b775751ee14f9, regionState=OPEN, openSeqNum=5, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=42, resume processing ppid=40 2024-11-11T12:42:10,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=40, state=SUCCESS; OpenRegionProcedure fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 in 188 msec 2024-11-11T12:42:10,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=39 2024-11-11T12:42:10,307 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=39, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, REOPEN/MOVE in 515 msec 2024-11-11T12:42:10,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=39, resume processing ppid=38 2024-11-11T12:42:10,310 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 536 msec 2024-11-11T12:42:10,314 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 604 msec 2024-11-11T12:42:10,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-11-11T12:42:10,323 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cae6c5c to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79982672 2024-11-11T12:42:10,491 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@433e2b26, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,493 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c820ef9 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7b4bd1ba 2024-11-11T12:42:10,497 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@176c5c1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,499 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0b44b1e5 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@454f1431 2024-11-11T12:42:10,526 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190853fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,527 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x42e904d8 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@505d5ccd 2024-11-11T12:42:10,530 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c5c4716, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,531 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a4c53ed to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@367f47f7 2024-11-11T12:42:10,534 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2885d2d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,535 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x247c0c93 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@22e911df 2024-11-11T12:42:10,538 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78cafade, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,539 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x517ff977 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3b727d6e 2024-11-11T12:42:10,542 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@14c16cd4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,544 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3448d233 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1c7940d9 2024-11-11T12:42:10,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@341384e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,547 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7a11164b to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c38ee58 2024-11-11T12:42:10,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26b120d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:10,557 DEBUG [hconnection-0x272ddab2-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,558 DEBUG [hconnection-0x7381168c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,558 DEBUG [hconnection-0x24793937-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,560 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,560 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40182, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:10,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees 2024-11-11T12:42:10,564 DEBUG [hconnection-0x19f2f03a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-11T12:42:10,565 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:10,566 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40192, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,567 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=43, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:10,567 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=43, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:10,568 DEBUG [hconnection-0x5d8fb141-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,568 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40200, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,570 DEBUG [hconnection-0x4289d2cd-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,570 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40208, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,571 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40220, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,575 DEBUG [hconnection-0x3a54e133-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,577 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40232, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,583 DEBUG [hconnection-0x37e7c97-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,584 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40240, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,586 DEBUG [hconnection-0x7ee0707e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:10,593 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:40246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:10,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,595 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:42:10,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:10,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:10,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:10,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:10,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:10,597 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:10,635 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328990631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,636 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328990631, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,637 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328990632, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731328990634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328990636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-11T12:42:10,702 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111548f3cf6f0f245eca21c73ace45ec970_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_1/A:col10/1731328930593/Put/seqid=0 2024-11-11T12:42:10,719 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,720 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-11T12:42:10,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:10,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:10,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:10,720 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:10,721 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:10,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:10,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741968_1144 (size=9714) 2024-11-11T12:42:10,735 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:10,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328990738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,739 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328990738, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328990739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,740 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731328990740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328990740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,746 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111548f3cf6f0f245eca21c73ace45ec970_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111548f3cf6f0f245eca21c73ace45ec970_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:10,747 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/b5a9b9c4269442ad804c09cccc1ff2dd, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:10,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/b5a9b9c4269442ad804c09cccc1ff2dd is 175, key is test_row_1/A:col10/1731328930593/Put/seqid=0 2024-11-11T12:42:10,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741969_1145 (size=22361) 2024-11-11T12:42:10,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-11T12:42:10,873 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,876 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-11T12:42:10,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:10,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:10,877 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:10,877 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:10,878 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:10,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:10,943 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328990941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328990941, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328990942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,944 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731328990942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:10,945 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:10,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328990942, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,031 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-11T12:42:11,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:11,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:11,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:11,040 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,040 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-11T12:42:11,194 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,194 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-11T12:42:11,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:11,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:11,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:11,195 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,195 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,216 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=17, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/b5a9b9c4269442ad804c09cccc1ff2dd 2024-11-11T12:42:11,249 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328991247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328991247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,250 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328991248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,253 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328991251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,266 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,267 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731328991266, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,270 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/1b2c067520cc4a389c792afcf6b4a301 is 50, key is test_row_1/B:col10/1731328930593/Put/seqid=0 2024-11-11T12:42:11,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741970_1146 (size=9657) 2024-11-11T12:42:11,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/1b2c067520cc4a389c792afcf6b4a301 2024-11-11T12:42:11,349 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,350 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-11T12:42:11,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:11,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:11,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:11,350 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] handler.RSProcedureHandler(58): pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,350 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=44 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=44 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:11,382 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/1dd50a8f5b89415298edce14eb09c231 is 50, key is test_row_1/C:col10/1731328930593/Put/seqid=0 2024-11-11T12:42:11,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741971_1147 (size=9657) 2024-11-11T12:42:11,432 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/1dd50a8f5b89415298edce14eb09c231 2024-11-11T12:42:11,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/b5a9b9c4269442ad804c09cccc1ff2dd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b5a9b9c4269442ad804c09cccc1ff2dd 2024-11-11T12:42:11,462 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b5a9b9c4269442ad804c09cccc1ff2dd, entries=100, sequenceid=17, filesize=21.8 K 2024-11-11T12:42:11,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/1b2c067520cc4a389c792afcf6b4a301 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1b2c067520cc4a389c792afcf6b4a301 2024-11-11T12:42:11,481 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1b2c067520cc4a389c792afcf6b4a301, entries=100, sequenceid=17, filesize=9.4 K 2024-11-11T12:42:11,482 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/1dd50a8f5b89415298edce14eb09c231 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/1dd50a8f5b89415298edce14eb09c231 2024-11-11T12:42:11,495 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/1dd50a8f5b89415298edce14eb09c231, entries=100, sequenceid=17, filesize=9.4 K 2024-11-11T12:42:11,496 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for fe7e7af7c234f1775e0b775751ee14f9 in 901ms, sequenceid=17, compaction requested=false 2024-11-11T12:42:11,496 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:11,512 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,513 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=44 2024-11-11T12:42:11,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:11,513 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:42:11,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:11,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:11,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:11,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:11,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:11,514 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:11,566 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111b925560164be4cd79960e46313f6b65d_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328930631/Put/seqid=0 2024-11-11T12:42:11,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741972_1148 (size=12154) 2024-11-11T12:42:11,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-11T12:42:11,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:11,775 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328991773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328991773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731328991773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328991774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,777 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328991776, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,878 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328991878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,878 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328991878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328991878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:11,881 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:11,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328991881, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:12,018 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111b925560164be4cd79960e46313f6b65d_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b925560164be4cd79960e46313f6b65d_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:12,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/89aef9a17c174728ae4fff7ea3a45654, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:12,021 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/89aef9a17c174728ae4fff7ea3a45654 is 175, key is test_row_0/A:col10/1731328930631/Put/seqid=0 2024-11-11T12:42:12,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741973_1149 (size=30955) 2024-11-11T12:42:12,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328992080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328992082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328992083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,089 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328992084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328992386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,391 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328992391, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,393 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328992393, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,401 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328992400, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,465 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/89aef9a17c174728ae4fff7ea3a45654 2024-11-11T12:42:12,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/c4e04d4a48754b08bc40d4acc3a433c6 is 50, key is test_row_0/B:col10/1731328930631/Put/seqid=0 2024-11-11T12:42:12,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741974_1150 (size=12001) 2024-11-11T12:42:12,518 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/c4e04d4a48754b08bc40d4acc3a433c6 2024-11-11T12:42:12,539 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/fda44a3cde0744029d7df9ae245229d0 is 50, key is test_row_0/C:col10/1731328930631/Put/seqid=0 2024-11-11T12:42:12,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741975_1151 (size=12001) 2024-11-11T12:42:12,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-11T12:42:12,781 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731328992781, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,815 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-11T12:42:12,817 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49214, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-11T12:42:12,896 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328992895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,903 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328992898, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,905 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328992901, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,912 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:12,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328992911, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:12,979 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/fda44a3cde0744029d7df9ae245229d0 2024-11-11T12:42:12,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/89aef9a17c174728ae4fff7ea3a45654 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/89aef9a17c174728ae4fff7ea3a45654 2024-11-11T12:42:13,014 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/89aef9a17c174728ae4fff7ea3a45654, entries=150, sequenceid=40, filesize=30.2 K 2024-11-11T12:42:13,016 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/c4e04d4a48754b08bc40d4acc3a433c6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/c4e04d4a48754b08bc40d4acc3a433c6 2024-11-11T12:42:13,022 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/c4e04d4a48754b08bc40d4acc3a433c6, entries=150, sequenceid=40, filesize=11.7 K 2024-11-11T12:42:13,028 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/fda44a3cde0744029d7df9ae245229d0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/fda44a3cde0744029d7df9ae245229d0 2024-11-11T12:42:13,040 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/fda44a3cde0744029d7df9ae245229d0, entries=150, sequenceid=40, filesize=11.7 K 2024-11-11T12:42:13,043 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for fe7e7af7c234f1775e0b775751ee14f9 in 1530ms, sequenceid=40, compaction requested=false 2024-11-11T12:42:13,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:13,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:13,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=44}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=44 2024-11-11T12:42:13,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=44 2024-11-11T12:42:13,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=43 2024-11-11T12:42:13,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4850 sec 2024-11-11T12:42:13,058 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=43, table=TestAcidGuarantees in 2.4920 sec 2024-11-11T12:42:13,883 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T12:42:13,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:13,901 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:42:13,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:13,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:13,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:13,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:13,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:13,902 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:13,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111210c6af68b6e4001964469bdcda8e8b5_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328933900/Put/seqid=0 2024-11-11T12:42:13,943 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:13,943 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328993939, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:13,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328993940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:13,946 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:13,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328993943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:13,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:13,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328993943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:13,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741976_1152 (size=14594) 2024-11-11T12:42:13,971 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:13,979 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111210c6af68b6e4001964469bdcda8e8b5_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111210c6af68b6e4001964469bdcda8e8b5_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:13,981 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/60808cfbdfe944578f4d7e45adfc7cce, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:13,982 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/60808cfbdfe944578f4d7e45adfc7cce is 175, key is test_row_0/A:col10/1731328933900/Put/seqid=0 2024-11-11T12:42:14,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741977_1153 (size=39549) 2024-11-11T12:42:14,007 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=54, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/60808cfbdfe944578f4d7e45adfc7cce 2024-11-11T12:42:14,018 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a9bea331eb174aca81d527817ca1a850 is 50, key is test_row_0/B:col10/1731328933900/Put/seqid=0 2024-11-11T12:42:14,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741978_1154 (size=12001) 2024-11-11T12:42:14,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a9bea331eb174aca81d527817ca1a850 2024-11-11T12:42:14,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7950aff868534949b29b2225b62ed5d9 is 50, key is test_row_0/C:col10/1731328933900/Put/seqid=0 2024-11-11T12:42:14,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328994045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741979_1155 (size=12001) 2024-11-11T12:42:14,057 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=54 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7950aff868534949b29b2225b62ed5d9 2024-11-11T12:42:14,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328994057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,059 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,059 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328994057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328994057, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,067 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/60808cfbdfe944578f4d7e45adfc7cce as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/60808cfbdfe944578f4d7e45adfc7cce 2024-11-11T12:42:14,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/60808cfbdfe944578f4d7e45adfc7cce, entries=200, sequenceid=54, filesize=38.6 K 2024-11-11T12:42:14,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a9bea331eb174aca81d527817ca1a850 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a9bea331eb174aca81d527817ca1a850 2024-11-11T12:42:14,122 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a9bea331eb174aca81d527817ca1a850, entries=150, sequenceid=54, filesize=11.7 K 2024-11-11T12:42:14,123 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7950aff868534949b29b2225b62ed5d9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7950aff868534949b29b2225b62ed5d9 2024-11-11T12:42:14,129 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7950aff868534949b29b2225b62ed5d9, entries=150, sequenceid=54, filesize=11.7 K 2024-11-11T12:42:14,130 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for fe7e7af7c234f1775e0b775751ee14f9 in 229ms, sequenceid=54, compaction requested=true 2024-11-11T12:42:14,130 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:14,130 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:14,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:14,131 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:14,131 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:14,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:14,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:14,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:14,132 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:14,132 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92865 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:14,132 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/A is initiating minor compaction (all files) 2024-11-11T12:42:14,135 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:14,135 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/B is initiating minor compaction (all files) 2024-11-11T12:42:14,135 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/B in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:14,135 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1b2c067520cc4a389c792afcf6b4a301, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/c4e04d4a48754b08bc40d4acc3a433c6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a9bea331eb174aca81d527817ca1a850] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=32.9 K 2024-11-11T12:42:14,136 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b2c067520cc4a389c792afcf6b4a301, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1731328930593 2024-11-11T12:42:14,136 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/A in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:14,136 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b5a9b9c4269442ad804c09cccc1ff2dd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/89aef9a17c174728ae4fff7ea3a45654, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/60808cfbdfe944578f4d7e45adfc7cce] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=90.7 K 2024-11-11T12:42:14,136 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:14,136 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b5a9b9c4269442ad804c09cccc1ff2dd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/89aef9a17c174728ae4fff7ea3a45654, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/60808cfbdfe944578f4d7e45adfc7cce] 2024-11-11T12:42:14,137 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c4e04d4a48754b08bc40d4acc3a433c6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731328930629 2024-11-11T12:42:14,138 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b5a9b9c4269442ad804c09cccc1ff2dd, keycount=100, bloomtype=ROW, size=21.8 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1731328930593 2024-11-11T12:42:14,138 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a9bea331eb174aca81d527817ca1a850, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731328931773 2024-11-11T12:42:14,138 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 89aef9a17c174728ae4fff7ea3a45654, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731328930629 2024-11-11T12:42:14,139 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 60808cfbdfe944578f4d7e45adfc7cce, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731328931762 2024-11-11T12:42:14,157 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:14,160 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#B#compaction#136 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:14,161 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/bcdff7e0fcf647f897f1d47b9505269d is 50, key is test_row_0/B:col10/1731328933900/Put/seqid=0 2024-11-11T12:42:14,163 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111dbc5fc1e91db4d8fa3b5638a56d7e897_fe7e7af7c234f1775e0b775751ee14f9 store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:14,170 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111dbc5fc1e91db4d8fa3b5638a56d7e897_fe7e7af7c234f1775e0b775751ee14f9, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:14,170 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111dbc5fc1e91db4d8fa3b5638a56d7e897_fe7e7af7c234f1775e0b775751ee14f9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:14,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741980_1156 (size=12104) 2024-11-11T12:42:14,222 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/bcdff7e0fcf647f897f1d47b9505269d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/bcdff7e0fcf647f897f1d47b9505269d 2024-11-11T12:42:14,231 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/B of fe7e7af7c234f1775e0b775751ee14f9 into bcdff7e0fcf647f897f1d47b9505269d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:14,231 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:14,231 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/B, priority=13, startTime=1731328934131; duration=0sec 2024-11-11T12:42:14,232 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:14,232 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:B 2024-11-11T12:42:14,232 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:14,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741981_1157 (size=4469) 2024-11-11T12:42:14,233 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:14,234 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/C is initiating minor compaction (all files) 2024-11-11T12:42:14,234 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/C in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:14,234 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/1dd50a8f5b89415298edce14eb09c231, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/fda44a3cde0744029d7df9ae245229d0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7950aff868534949b29b2225b62ed5d9] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=32.9 K 2024-11-11T12:42:14,235 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1dd50a8f5b89415298edce14eb09c231, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=17, earliestPutTs=1731328930593 2024-11-11T12:42:14,236 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting fda44a3cde0744029d7df9ae245229d0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731328930629 2024-11-11T12:42:14,236 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7950aff868534949b29b2225b62ed5d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731328931773 2024-11-11T12:42:14,249 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:42:14,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:14,249 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:14,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:14,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:14,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:14,250 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:14,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:14,262 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#C#compaction#137 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:14,263 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/77dfce55f7394e35b8be9f9ec3e6c86b is 50, key is test_row_0/C:col10/1731328933900/Put/seqid=0 2024-11-11T12:42:14,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741982_1158 (size=12104) 2024-11-11T12:42:14,283 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411111cd423ef189247cfa33ae262d3357b06_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328933941/Put/seqid=0 2024-11-11T12:42:14,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741983_1159 (size=14594) 2024-11-11T12:42:14,295 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:14,300 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411111cd423ef189247cfa33ae262d3357b06_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411111cd423ef189247cfa33ae262d3357b06_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:14,301 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/0b144c86b2b04ffab0e7ea384f6d1bdb, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:14,302 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/0b144c86b2b04ffab0e7ea384f6d1bdb is 175, key is test_row_0/A:col10/1731328933941/Put/seqid=0 2024-11-11T12:42:14,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741984_1160 (size=39549) 2024-11-11T12:42:14,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328994323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,324 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328994323, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,325 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328994324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,326 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328994324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,426 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328994426, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,427 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328994427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,428 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328994427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,432 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328994431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,631 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328994629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,635 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328994635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,636 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#A#compaction#135 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:14,638 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2c80448f4bad4e6a855f8624024ae2ea is 175, key is test_row_0/A:col10/1731328933900/Put/seqid=0 2024-11-11T12:42:14,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328994636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,641 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328994636, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741985_1161 (size=31058) 2024-11-11T12:42:14,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=43 2024-11-11T12:42:14,677 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 43 completed 2024-11-11T12:42:14,683 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:14,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees 2024-11-11T12:42:14,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-11T12:42:14,697 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:14,698 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/77dfce55f7394e35b8be9f9ec3e6c86b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/77dfce55f7394e35b8be9f9ec3e6c86b 2024-11-11T12:42:14,704 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=45, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:14,704 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:14,709 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=79, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/0b144c86b2b04ffab0e7ea384f6d1bdb 2024-11-11T12:42:14,755 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/C of fe7e7af7c234f1775e0b775751ee14f9 into 77dfce55f7394e35b8be9f9ec3e6c86b(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:14,755 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:14,755 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/C, priority=13, startTime=1731328934132; duration=0sec 2024-11-11T12:42:14,756 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:14,756 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:C 2024-11-11T12:42:14,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/166b9b6995b54d138493bec0a6fca487 is 50, key is test_row_0/B:col10/1731328933941/Put/seqid=0 2024-11-11T12:42:14,785 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:14,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731328994784, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,787 DEBUG [Thread-705 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4153 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:14,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-11T12:42:14,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741986_1162 (size=12001) 2024-11-11T12:42:14,798 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/166b9b6995b54d138493bec0a6fca487 2024-11-11T12:42:14,816 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/92d4c630cafc4f91aadd852963e842e5 is 50, key is test_row_0/C:col10/1731328933941/Put/seqid=0 2024-11-11T12:42:14,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741987_1163 (size=12001) 2024-11-11T12:42:14,856 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/92d4c630cafc4f91aadd852963e842e5 2024-11-11T12:42:14,860 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:14,861 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-11T12:42:14,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:14,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:14,861 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:14,861 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:14,862 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:14,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:14,870 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/0b144c86b2b04ffab0e7ea384f6d1bdb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/0b144c86b2b04ffab0e7ea384f6d1bdb 2024-11-11T12:42:14,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/0b144c86b2b04ffab0e7ea384f6d1bdb, entries=200, sequenceid=79, filesize=38.6 K 2024-11-11T12:42:14,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/166b9b6995b54d138493bec0a6fca487 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/166b9b6995b54d138493bec0a6fca487 2024-11-11T12:42:14,899 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/166b9b6995b54d138493bec0a6fca487, entries=150, sequenceid=79, filesize=11.7 K 2024-11-11T12:42:14,901 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/92d4c630cafc4f91aadd852963e842e5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/92d4c630cafc4f91aadd852963e842e5 2024-11-11T12:42:14,906 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/92d4c630cafc4f91aadd852963e842e5, entries=150, sequenceid=79, filesize=11.7 K 2024-11-11T12:42:14,907 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=67.09 KB/68700 for fe7e7af7c234f1775e0b775751ee14f9 in 658ms, sequenceid=79, compaction requested=false 2024-11-11T12:42:14,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:14,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:14,941 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:42:14,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:14,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:14,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:14,989 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ab704bfade144584a1335ab8775150c4_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328934934/Put/seqid=0 2024-11-11T12:42:14,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-11T12:42:15,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741988_1164 (size=12154) 2024-11-11T12:42:15,007 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:15,013 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ab704bfade144584a1335ab8775150c4_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ab704bfade144584a1335ab8775150c4_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:15,014 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/a09f58fcf62c4059b103cadc38cba57a, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:15,015 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/a09f58fcf62c4059b103cadc38cba57a is 175, key is test_row_0/A:col10/1731328934934/Put/seqid=0 2024-11-11T12:42:15,022 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,023 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-11T12:42:15,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:15,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,023 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:15,023 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:15,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:15,040 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328995033, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328995034, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,041 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328995035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328995038, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741989_1165 (size=30955) 2024-11-11T12:42:15,049 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=95, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/a09f58fcf62c4059b103cadc38cba57a 2024-11-11T12:42:15,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/765c8385ea7b4c0b854998f3e0b4f349 is 50, key is test_row_0/B:col10/1731328934934/Put/seqid=0 2024-11-11T12:42:15,078 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2c80448f4bad4e6a855f8624024ae2ea as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2c80448f4bad4e6a855f8624024ae2ea 2024-11-11T12:42:15,087 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/A of fe7e7af7c234f1775e0b775751ee14f9 into 2c80448f4bad4e6a855f8624024ae2ea(size=30.3 K), total size for store is 69.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:15,087 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:15,087 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/A, priority=13, startTime=1731328934130; duration=0sec 2024-11-11T12:42:15,087 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:15,087 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:A 2024-11-11T12:42:15,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741990_1166 (size=12001) 2024-11-11T12:42:15,106 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/765c8385ea7b4c0b854998f3e0b4f349 2024-11-11T12:42:15,138 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/3303e534e1c445468ccc336c93e7e673 is 50, key is test_row_0/C:col10/1731328934934/Put/seqid=0 2024-11-11T12:42:15,149 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328995148, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,150 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328995149, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,156 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328995152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,157 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328995152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741991_1167 (size=12001) 2024-11-11T12:42:15,179 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=95 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/3303e534e1c445468ccc336c93e7e673 2024-11-11T12:42:15,188 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-11T12:42:15,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:15,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,189 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] handler.RSProcedureHandler(58): pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:15,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=46 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:15,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=46 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:15,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/a09f58fcf62c4059b103cadc38cba57a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/a09f58fcf62c4059b103cadc38cba57a 2024-11-11T12:42:15,228 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/a09f58fcf62c4059b103cadc38cba57a, entries=150, sequenceid=95, filesize=30.2 K 2024-11-11T12:42:15,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/765c8385ea7b4c0b854998f3e0b4f349 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/765c8385ea7b4c0b854998f3e0b4f349 2024-11-11T12:42:15,240 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/765c8385ea7b4c0b854998f3e0b4f349, entries=150, sequenceid=95, filesize=11.7 K 2024-11-11T12:42:15,242 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/3303e534e1c445468ccc336c93e7e673 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3303e534e1c445468ccc336c93e7e673 2024-11-11T12:42:15,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3303e534e1c445468ccc336c93e7e673, entries=150, sequenceid=95, filesize=11.7 K 2024-11-11T12:42:15,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for fe7e7af7c234f1775e0b775751ee14f9 in 311ms, sequenceid=95, compaction requested=true 2024-11-11T12:42:15,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:15,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:15,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:15,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:15,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:42:15,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:15,253 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-11T12:42:15,253 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:15,253 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:15,257 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:15,257 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/C is initiating minor compaction (all files) 2024-11-11T12:42:15,257 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/C in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,258 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/77dfce55f7394e35b8be9f9ec3e6c86b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/92d4c630cafc4f91aadd852963e842e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3303e534e1c445468ccc336c93e7e673] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=35.3 K 2024-11-11T12:42:15,258 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 77dfce55f7394e35b8be9f9ec3e6c86b, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731328931773 2024-11-11T12:42:15,259 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 92d4c630cafc4f91aadd852963e842e5, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731328933941 2024-11-11T12:42:15,259 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3303e534e1c445468ccc336c93e7e673, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731328934322 2024-11-11T12:42:15,263 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101562 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:15,263 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/A is initiating minor compaction (all files) 2024-11-11T12:42:15,263 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/A in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,263 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2c80448f4bad4e6a855f8624024ae2ea, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/0b144c86b2b04ffab0e7ea384f6d1bdb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/a09f58fcf62c4059b103cadc38cba57a] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=99.2 K 2024-11-11T12:42:15,264 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,264 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2c80448f4bad4e6a855f8624024ae2ea, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/0b144c86b2b04ffab0e7ea384f6d1bdb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/a09f58fcf62c4059b103cadc38cba57a] 2024-11-11T12:42:15,264 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c80448f4bad4e6a855f8624024ae2ea, keycount=150, bloomtype=ROW, size=30.3 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731328931773 2024-11-11T12:42:15,265 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 0b144c86b2b04ffab0e7ea384f6d1bdb, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731328933938 2024-11-11T12:42:15,265 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting a09f58fcf62c4059b103cadc38cba57a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731328934322 2024-11-11T12:42:15,276 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#C#compaction#144 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:15,276 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/5fd8e06e46634be0b0bd1656640cdb9e is 50, key is test_row_0/C:col10/1731328934934/Put/seqid=0 2024-11-11T12:42:15,282 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:15,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-11T12:42:15,298 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111d753496e67804d8882b9cdd88df95482_fe7e7af7c234f1775e0b775751ee14f9 store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:15,304 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111d753496e67804d8882b9cdd88df95482_fe7e7af7c234f1775e0b775751ee14f9, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:15,305 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111d753496e67804d8882b9cdd88df95482_fe7e7af7c234f1775e0b775751ee14f9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:15,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741992_1168 (size=12207) 2024-11-11T12:42:15,351 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/5fd8e06e46634be0b0bd1656640cdb9e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/5fd8e06e46634be0b0bd1656640cdb9e 2024-11-11T12:42:15,354 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,355 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=46 2024-11-11T12:42:15,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,355 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=120.76 KB heapSize=317.16 KB 2024-11-11T12:42:15,358 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/C of fe7e7af7c234f1775e0b775751ee14f9 into 5fd8e06e46634be0b0bd1656640cdb9e(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:15,358 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:15,358 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/C, priority=13, startTime=1731328935253; duration=0sec 2024-11-11T12:42:15,358 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:15,359 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:C 2024-11-11T12:42:15,359 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:15,361 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:15,362 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/B is initiating minor compaction (all files) 2024-11-11T12:42:15,362 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/B in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:15,362 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/bcdff7e0fcf647f897f1d47b9505269d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/166b9b6995b54d138493bec0a6fca487, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/765c8385ea7b4c0b854998f3e0b4f349] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=35.3 K 2024-11-11T12:42:15,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:15,363 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:15,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:15,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:15,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:15,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:15,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:15,363 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:15,364 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bcdff7e0fcf647f897f1d47b9505269d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=54, earliestPutTs=1731328931773 2024-11-11T12:42:15,364 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 166b9b6995b54d138493bec0a6fca487, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731328933941 2024-11-11T12:42:15,365 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 765c8385ea7b4c0b854998f3e0b4f349, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731328934322 2024-11-11T12:42:15,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741993_1169 (size=4469) 2024-11-11T12:42:15,383 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110ccabba8b64a460c85ec32c97d72aae0_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328935352/Put/seqid=0 2024-11-11T12:42:15,389 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#A#compaction#145 average throughput is 0.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:15,390 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/9441e002d3ec4c8086ea3f382595a385 is 175, key is test_row_0/A:col10/1731328934934/Put/seqid=0 2024-11-11T12:42:15,402 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328995398, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,405 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328995401, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,406 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328995402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,407 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328995402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,409 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#B#compaction#147 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:15,409 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/d494ef56cfdb4df2bdb02ef5c74ad3e9 is 50, key is test_row_0/B:col10/1731328934934/Put/seqid=0 2024-11-11T12:42:15,414 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741994_1170 (size=14594) 2024-11-11T12:42:15,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741995_1171 (size=31161) 2024-11-11T12:42:15,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741996_1172 (size=12207) 2024-11-11T12:42:15,430 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/9441e002d3ec4c8086ea3f382595a385 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/9441e002d3ec4c8086ea3f382595a385 2024-11-11T12:42:15,463 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/d494ef56cfdb4df2bdb02ef5c74ad3e9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/d494ef56cfdb4df2bdb02ef5c74ad3e9 2024-11-11T12:42:15,470 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/A of fe7e7af7c234f1775e0b775751ee14f9 into 9441e002d3ec4c8086ea3f382595a385(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:15,470 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:15,470 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/A, priority=13, startTime=1731328935252; duration=0sec 2024-11-11T12:42:15,470 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:15,470 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:A 2024-11-11T12:42:15,475 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/B of fe7e7af7c234f1775e0b775751ee14f9 into d494ef56cfdb4df2bdb02ef5c74ad3e9(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:15,476 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:15,476 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/B, priority=13, startTime=1731328935253; duration=0sec 2024-11-11T12:42:15,476 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:15,476 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:B 2024-11-11T12:42:15,504 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328995504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,509 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328995506, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,509 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328995507, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,521 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328995520, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,707 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328995706, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328995713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328995713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:15,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328995723, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:15,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-11T12:42:15,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:15,822 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110ccabba8b64a460c85ec32c97d72aae0_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110ccabba8b64a460c85ec32c97d72aae0_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:15,823 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/85213c7265a746299e1d7a86cc85bd65, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:15,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/85213c7265a746299e1d7a86cc85bd65 is 175, key is test_row_0/A:col10/1731328935352/Put/seqid=0 2024-11-11T12:42:15,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741997_1173 (size=39549) 2024-11-11T12:42:16,010 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:16,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328996009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:16,018 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:16,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328996017, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:16,028 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:16,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328996026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:16,029 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:16,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328996027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:16,232 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=119, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/85213c7265a746299e1d7a86cc85bd65 2024-11-11T12:42:16,254 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a2318b042e7f4f39a1e067bc8331d894 is 50, key is test_row_0/B:col10/1731328935352/Put/seqid=0 2024-11-11T12:42:16,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741998_1174 (size=12001) 2024-11-11T12:42:16,518 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:16,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328996516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:16,529 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:16,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328996528, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:16,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:16,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328996534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:16,535 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:16,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328996535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:16,694 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a2318b042e7f4f39a1e067bc8331d894 2024-11-11T12:42:16,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/4eb272fbc7204185959c5dc2dc912e3b is 50, key is test_row_0/C:col10/1731328935352/Put/seqid=0 2024-11-11T12:42:16,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741999_1175 (size=12001) 2024-11-11T12:42:16,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-11T12:42:17,149 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=119 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/4eb272fbc7204185959c5dc2dc912e3b 2024-11-11T12:42:17,171 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/85213c7265a746299e1d7a86cc85bd65 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/85213c7265a746299e1d7a86cc85bd65 2024-11-11T12:42:17,182 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/85213c7265a746299e1d7a86cc85bd65, entries=200, sequenceid=119, filesize=38.6 K 2024-11-11T12:42:17,183 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a2318b042e7f4f39a1e067bc8331d894 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a2318b042e7f4f39a1e067bc8331d894 2024-11-11T12:42:17,190 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a2318b042e7f4f39a1e067bc8331d894, entries=150, sequenceid=119, filesize=11.7 K 2024-11-11T12:42:17,191 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/4eb272fbc7204185959c5dc2dc912e3b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4eb272fbc7204185959c5dc2dc912e3b 2024-11-11T12:42:17,201 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4eb272fbc7204185959c5dc2dc912e3b, entries=150, sequenceid=119, filesize=11.7 K 2024-11-11T12:42:17,203 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=80.51 KB/82440 for fe7e7af7c234f1775e0b775751ee14f9 in 1848ms, sequenceid=119, compaction requested=false 2024-11-11T12:42:17,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:17,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:17,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=46}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=46 2024-11-11T12:42:17,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=46 2024-11-11T12:42:17,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=46, resume processing ppid=45 2024-11-11T12:42:17,206 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, ppid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5000 sec 2024-11-11T12:42:17,207 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=45, table=TestAcidGuarantees in 2.5230 sec 2024-11-11T12:42:17,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:17,526 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=87.22 KB heapSize=229.27 KB 2024-11-11T12:42:17,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:17,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:17,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:17,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:17,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:17,527 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:17,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111165a16b3ad2554398af2d341fa6d92ba3_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328935396/Put/seqid=0 2024-11-11T12:42:17,574 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328997557, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,581 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328997574, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328997575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,582 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328997575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742000_1176 (size=12304) 2024-11-11T12:42:17,687 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328997683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,688 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328997684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328997684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328997685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,891 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,892 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328997891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328997889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,900 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328997895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,908 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328997892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:17,990 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:17,996 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111165a16b3ad2554398af2d341fa6d92ba3_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111165a16b3ad2554398af2d341fa6d92ba3_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:18,005 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2d08e551310640d580d64521904a852e, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:18,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2d08e551310640d580d64521904a852e is 175, key is test_row_0/A:col10/1731328935396/Put/seqid=0 2024-11-11T12:42:18,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742001_1177 (size=31105) 2024-11-11T12:42:18,195 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328998194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,198 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328998197, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,206 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328998206, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,215 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328998211, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,421 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=137, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2d08e551310640d580d64521904a852e 2024-11-11T12:42:18,430 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/283e2f8e02214eaa899b1711f6b593f8 is 50, key is test_row_0/B:col10/1731328935396/Put/seqid=0 2024-11-11T12:42:18,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742002_1178 (size=12151) 2024-11-11T12:42:18,451 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/283e2f8e02214eaa899b1711f6b593f8 2024-11-11T12:42:18,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/6bcb7f9e74f24ca3819a3ce212032f4f is 50, key is test_row_0/C:col10/1731328935396/Put/seqid=0 2024-11-11T12:42:18,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742003_1179 (size=12151) 2024-11-11T12:42:18,530 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=137 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/6bcb7f9e74f24ca3819a3ce212032f4f 2024-11-11T12:42:18,538 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2d08e551310640d580d64521904a852e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2d08e551310640d580d64521904a852e 2024-11-11T12:42:18,554 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2d08e551310640d580d64521904a852e, entries=150, sequenceid=137, filesize=30.4 K 2024-11-11T12:42:18,555 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/283e2f8e02214eaa899b1711f6b593f8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/283e2f8e02214eaa899b1711f6b593f8 2024-11-11T12:42:18,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/283e2f8e02214eaa899b1711f6b593f8, entries=150, sequenceid=137, filesize=11.9 K 2024-11-11T12:42:18,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/6bcb7f9e74f24ca3819a3ce212032f4f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/6bcb7f9e74f24ca3819a3ce212032f4f 2024-11-11T12:42:18,573 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/6bcb7f9e74f24ca3819a3ce212032f4f, entries=150, sequenceid=137, filesize=11.9 K 2024-11-11T12:42:18,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for fe7e7af7c234f1775e0b775751ee14f9 in 1049ms, sequenceid=137, compaction requested=true 2024-11-11T12:42:18,576 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:18,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:18,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:18,576 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:18,576 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:18,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:18,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:18,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:18,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:18,577 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 101815 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:18,578 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:18,578 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/A is initiating minor compaction (all files) 2024-11-11T12:42:18,578 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/B is initiating minor compaction (all files) 2024-11-11T12:42:18,578 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/A in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:18,578 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/B in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:18,578 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/d494ef56cfdb4df2bdb02ef5c74ad3e9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a2318b042e7f4f39a1e067bc8331d894, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/283e2f8e02214eaa899b1711f6b593f8] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=35.5 K 2024-11-11T12:42:18,578 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/9441e002d3ec4c8086ea3f382595a385, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/85213c7265a746299e1d7a86cc85bd65, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2d08e551310640d580d64521904a852e] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=99.4 K 2024-11-11T12:42:18,578 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:18,578 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/9441e002d3ec4c8086ea3f382595a385, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/85213c7265a746299e1d7a86cc85bd65, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2d08e551310640d580d64521904a852e] 2024-11-11T12:42:18,578 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d494ef56cfdb4df2bdb02ef5c74ad3e9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731328934322 2024-11-11T12:42:18,579 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9441e002d3ec4c8086ea3f382595a385, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731328934322 2024-11-11T12:42:18,579 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 85213c7265a746299e1d7a86cc85bd65, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1731328934971 2024-11-11T12:42:18,579 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a2318b042e7f4f39a1e067bc8331d894, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1731328935034 2024-11-11T12:42:18,579 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d08e551310640d580d64521904a852e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731328935396 2024-11-11T12:42:18,579 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 283e2f8e02214eaa899b1711f6b593f8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731328935396 2024-11-11T12:42:18,603 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:18,613 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111d51b628ab5044f7ab4c0c9d9490755d7_fe7e7af7c234f1775e0b775751ee14f9 store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:18,613 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#B#compaction#154 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:18,614 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/3ae9b59b11c640458fbac5bef7121d57 is 50, key is test_row_0/B:col10/1731328935396/Put/seqid=0 2024-11-11T12:42:18,615 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111d51b628ab5044f7ab4c0c9d9490755d7_fe7e7af7c234f1775e0b775751ee14f9, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:18,615 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111d51b628ab5044f7ab4c0c9d9490755d7_fe7e7af7c234f1775e0b775751ee14f9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:18,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742005_1181 (size=4469) 2024-11-11T12:42:18,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742004_1180 (size=12459) 2024-11-11T12:42:18,666 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/3ae9b59b11c640458fbac5bef7121d57 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3ae9b59b11c640458fbac5bef7121d57 2024-11-11T12:42:18,677 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/B of fe7e7af7c234f1775e0b775751ee14f9 into 3ae9b59b11c640458fbac5bef7121d57(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:18,677 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:18,677 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/B, priority=13, startTime=1731328938576; duration=0sec 2024-11-11T12:42:18,678 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:18,679 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:B 2024-11-11T12:42:18,679 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:18,683 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:18,683 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/C is initiating minor compaction (all files) 2024-11-11T12:42:18,683 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/C in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:18,684 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/5fd8e06e46634be0b0bd1656640cdb9e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4eb272fbc7204185959c5dc2dc912e3b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/6bcb7f9e74f24ca3819a3ce212032f4f] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=35.5 K 2024-11-11T12:42:18,685 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 5fd8e06e46634be0b0bd1656640cdb9e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=95, earliestPutTs=1731328934322 2024-11-11T12:42:18,685 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4eb272fbc7204185959c5dc2dc912e3b, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=119, earliestPutTs=1731328935034 2024-11-11T12:42:18,686 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 6bcb7f9e74f24ca3819a3ce212032f4f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731328935396 2024-11-11T12:42:18,694 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#C#compaction#155 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:18,695 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/a063a0262e474bc78688ce0ee4f2a47b is 50, key is test_row_0/C:col10/1731328935396/Put/seqid=0 2024-11-11T12:42:18,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:18,701 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:42:18,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:18,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:18,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:18,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:18,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:18,704 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:18,724 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,724 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328998715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328998717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,725 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328998724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328998728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742006_1182 (size=12459) 2024-11-11T12:42:18,759 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111c7d0554a867243948bdc921b42e7a96f_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328938700/Put/seqid=0 2024-11-11T12:42:18,760 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/a063a0262e474bc78688ce0ee4f2a47b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/a063a0262e474bc78688ce0ee4f2a47b 2024-11-11T12:42:18,768 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/C of fe7e7af7c234f1775e0b775751ee14f9 into a063a0262e474bc78688ce0ee4f2a47b(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:18,768 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:18,768 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/C, priority=13, startTime=1731328938576; duration=0sec 2024-11-11T12:42:18,768 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:18,768 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:C 2024-11-11T12:42:18,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742007_1183 (size=14794) 2024-11-11T12:42:18,787 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:18,792 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111c7d0554a867243948bdc921b42e7a96f_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c7d0554a867243948bdc921b42e7a96f_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:18,794 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/f6082ea43ab1493594b63d6153dd6276, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:18,794 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/f6082ea43ab1493594b63d6153dd6276 is 175, key is test_row_0/A:col10/1731328938700/Put/seqid=0 2024-11-11T12:42:18,797 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731328998795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,798 DEBUG [Thread-705 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8163 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:18,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=45 2024-11-11T12:42:18,803 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 45 completed 2024-11-11T12:42:18,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:18,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees 2024-11-11T12:42:18,806 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:18,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-11T12:42:18,807 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=47, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:18,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:18,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742008_1184 (size=39749) 2024-11-11T12:42:18,815 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=161, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/f6082ea43ab1493594b63d6153dd6276 2024-11-11T12:42:18,825 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874 is 50, key is test_row_0/B:col10/1731328938700/Put/seqid=0 2024-11-11T12:42:18,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328998826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328998827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,833 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:18,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328998831, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742009_1185 (size=12151) 2024-11-11T12:42:18,849 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874 2024-11-11T12:42:18,861 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/ab2767badf54475398556da5efba2e67 is 50, key is test_row_0/C:col10/1731328938700/Put/seqid=0 2024-11-11T12:42:18,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742010_1186 (size=12151) 2024-11-11T12:42:18,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/ab2767badf54475398556da5efba2e67 2024-11-11T12:42:18,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-11T12:42:18,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/f6082ea43ab1493594b63d6153dd6276 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f6082ea43ab1493594b63d6153dd6276 2024-11-11T12:42:18,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f6082ea43ab1493594b63d6153dd6276, entries=200, sequenceid=161, filesize=38.8 K 2024-11-11T12:42:18,917 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874 2024-11-11T12:42:18,927 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874, entries=150, sequenceid=161, filesize=11.9 K 2024-11-11T12:42:18,930 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/ab2767badf54475398556da5efba2e67 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ab2767badf54475398556da5efba2e67 2024-11-11T12:42:18,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ab2767badf54475398556da5efba2e67, entries=150, sequenceid=161, filesize=11.9 K 2024-11-11T12:42:18,938 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for fe7e7af7c234f1775e0b775751ee14f9 in 237ms, sequenceid=161, compaction requested=false 2024-11-11T12:42:18,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:18,959 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:18,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=48 2024-11-11T12:42:18,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:18,960 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:42:18,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:18,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:18,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:18,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:18,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:18,961 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:18,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411119e7678264a764d5281dd796e2f4b5ea3_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328938723/Put/seqid=0 2024-11-11T12:42:18,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742011_1187 (size=12304) 2024-11-11T12:42:19,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:19,036 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:19,057 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#A#compaction#153 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:19,058 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/55449cbfecfb45cfb79357ccc1115a83 is 175, key is test_row_0/A:col10/1731328935396/Put/seqid=0 2024-11-11T12:42:19,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742012_1188 (size=31413) 2024-11-11T12:42:19,069 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328999068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328999069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328999070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-11T12:42:19,171 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328999171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328999173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,175 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328999173, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328999374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,378 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,378 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328999377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328999377, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,394 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:19,399 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411119e7678264a764d5281dd796e2f4b5ea3_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119e7678264a764d5281dd796e2f4b5ea3_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:19,400 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/5c09b5133e534bb4be858353e11ba0ff, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:19,401 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/5c09b5133e534bb4be858353e11ba0ff is 175, key is test_row_0/A:col10/1731328938723/Put/seqid=0 2024-11-11T12:42:19,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742013_1189 (size=31105) 2024-11-11T12:42:19,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-11T12:42:19,470 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/55449cbfecfb45cfb79357ccc1115a83 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/55449cbfecfb45cfb79357ccc1115a83 2024-11-11T12:42:19,481 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/A of fe7e7af7c234f1775e0b775751ee14f9 into 55449cbfecfb45cfb79357ccc1115a83(size=30.7 K), total size for store is 69.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:19,481 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:19,481 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/A, priority=13, startTime=1731328938576; duration=0sec 2024-11-11T12:42:19,481 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:19,481 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:A 2024-11-11T12:42:19,679 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731328999678, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731328999680, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,682 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731328999681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,730 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:19,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731328999729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:19,806 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=175, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/5c09b5133e534bb4be858353e11ba0ff 2024-11-11T12:42:19,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/ee63ce79a0b94671a7bb11f0e6e27b51 is 50, key is test_row_0/B:col10/1731328938723/Put/seqid=0 2024-11-11T12:42:19,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742014_1190 (size=12151) 2024-11-11T12:42:19,824 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/ee63ce79a0b94671a7bb11f0e6e27b51 2024-11-11T12:42:19,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/0adffd5fff784cb487b5d658071d36fd is 50, key is test_row_0/C:col10/1731328938723/Put/seqid=0 2024-11-11T12:42:19,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742015_1191 (size=12151) 2024-11-11T12:42:19,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-11T12:42:20,182 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:20,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329000180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:20,187 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:20,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329000186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:20,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:20,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329000186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:20,238 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=175 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/0adffd5fff784cb487b5d658071d36fd 2024-11-11T12:42:20,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/5c09b5133e534bb4be858353e11ba0ff as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5c09b5133e534bb4be858353e11ba0ff 2024-11-11T12:42:20,248 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5c09b5133e534bb4be858353e11ba0ff, entries=150, sequenceid=175, filesize=30.4 K 2024-11-11T12:42:20,250 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/ee63ce79a0b94671a7bb11f0e6e27b51 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/ee63ce79a0b94671a7bb11f0e6e27b51 2024-11-11T12:42:20,255 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/ee63ce79a0b94671a7bb11f0e6e27b51, entries=150, sequenceid=175, filesize=11.9 K 2024-11-11T12:42:20,257 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/0adffd5fff784cb487b5d658071d36fd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/0adffd5fff784cb487b5d658071d36fd 2024-11-11T12:42:20,261 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/0adffd5fff784cb487b5d658071d36fd, entries=150, sequenceid=175, filesize=11.9 K 2024-11-11T12:42:20,264 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for fe7e7af7c234f1775e0b775751ee14f9 in 1304ms, sequenceid=175, compaction requested=true 2024-11-11T12:42:20,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:20,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:20,264 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-11-11T12:42:20,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-11-11T12:42:20,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=48, resume processing ppid=47 2024-11-11T12:42:20,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4620 sec 2024-11-11T12:42:20,275 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=47, table=TestAcidGuarantees in 1.4680 sec 2024-11-11T12:42:20,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=47 2024-11-11T12:42:20,911 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 47 completed 2024-11-11T12:42:20,914 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:20,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees 2024-11-11T12:42:20,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-11T12:42:20,916 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:20,917 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=49, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:20,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:21,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-11T12:42:21,071 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,073 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=50 2024-11-11T12:42:21,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:21,075 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:42:21,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:21,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:21,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111047424db6050469998085695f1c64cd3_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328939067/Put/seqid=0 2024-11-11T12:42:21,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742016_1192 (size=12304) 2024-11-11T12:42:21,192 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:21,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:21,214 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329001212, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329001213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,215 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329001213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-11T12:42:21,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329001317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,319 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329001317, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329001328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,519 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329001519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,523 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329001521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-11T12:42:21,533 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329001531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:21,554 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111047424db6050469998085695f1c64cd3_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111047424db6050469998085695f1c64cd3_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:21,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/ac11a70564de4e598c7b5bbbf0b89c1d, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:21,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/ac11a70564de4e598c7b5bbbf0b89c1d is 175, key is test_row_0/A:col10/1731328939067/Put/seqid=0 2024-11-11T12:42:21,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742017_1193 (size=31105) 2024-11-11T12:42:21,568 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=199, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/ac11a70564de4e598c7b5bbbf0b89c1d 2024-11-11T12:42:21,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/4d96e830bec447378381f91837dd2b11 is 50, key is test_row_0/B:col10/1731328939067/Put/seqid=0 2024-11-11T12:42:21,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742018_1194 (size=12151) 2024-11-11T12:42:21,592 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/4d96e830bec447378381f91837dd2b11 2024-11-11T12:42:21,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/ac2a38fbd7d447cda4b892cd813b4dcb is 50, key is test_row_0/C:col10/1731328939067/Put/seqid=0 2024-11-11T12:42:21,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742019_1195 (size=12151) 2024-11-11T12:42:21,744 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329001741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,745 DEBUG [Thread-697 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:21,822 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329001821, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,828 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329001825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,836 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:21,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329001835, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:21,967 INFO [master/32e78532c8b1:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-11T12:42:21,967 INFO [master/32e78532c8b1:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-11T12:42:22,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-11T12:42:22,053 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=199 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/ac2a38fbd7d447cda4b892cd813b4dcb 2024-11-11T12:42:22,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/ac11a70564de4e598c7b5bbbf0b89c1d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/ac11a70564de4e598c7b5bbbf0b89c1d 2024-11-11T12:42:22,075 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/ac11a70564de4e598c7b5bbbf0b89c1d, entries=150, sequenceid=199, filesize=30.4 K 2024-11-11T12:42:22,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/4d96e830bec447378381f91837dd2b11 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/4d96e830bec447378381f91837dd2b11 2024-11-11T12:42:22,083 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/4d96e830bec447378381f91837dd2b11, entries=150, sequenceid=199, filesize=11.9 K 2024-11-11T12:42:22,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/ac2a38fbd7d447cda4b892cd813b4dcb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ac2a38fbd7d447cda4b892cd813b4dcb 2024-11-11T12:42:22,096 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ac2a38fbd7d447cda4b892cd813b4dcb, entries=150, sequenceid=199, filesize=11.9 K 2024-11-11T12:42:22,108 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for fe7e7af7c234f1775e0b775751ee14f9 in 1035ms, sequenceid=199, compaction requested=true 2024-11-11T12:42:22,108 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:22,109 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=50}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=50 2024-11-11T12:42:22,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=50 2024-11-11T12:42:22,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-11-11T12:42:22,114 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1960 sec 2024-11-11T12:42:22,117 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=49, table=TestAcidGuarantees in 1.2010 sec 2024-11-11T12:42:22,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:22,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:42:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:22,331 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:22,341 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e92a64019b514c8d8b649dc18d81ab30_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328941210/Put/seqid=0 2024-11-11T12:42:22,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742020_1196 (size=12304) 2024-11-11T12:42:22,373 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:22,382 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e92a64019b514c8d8b649dc18d81ab30_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e92a64019b514c8d8b649dc18d81ab30_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:22,383 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/bd90ae292a9f41ea9729abd3c3d3769e, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:22,384 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/bd90ae292a9f41ea9729abd3c3d3769e is 175, key is test_row_0/A:col10/1731328941210/Put/seqid=0 2024-11-11T12:42:22,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329002402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329002403, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,409 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329002404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,413 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742021_1197 (size=31105) 2024-11-11T12:42:22,511 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,511 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329002510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329002509, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,512 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329002510, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329002714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,717 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329002716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,717 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329002715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:22,814 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=213, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/bd90ae292a9f41ea9729abd3c3d3769e 2024-11-11T12:42:22,850 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/7be270f6aa074fce9a2a4ad6607a1a2d is 50, key is test_row_0/B:col10/1731328941210/Put/seqid=0 2024-11-11T12:42:22,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742022_1198 (size=12151) 2024-11-11T12:42:23,019 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329003018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329003018, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,020 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329003019, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-11-11T12:42:23,037 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 49 completed 2024-11-11T12:42:23,041 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:23,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees 2024-11-11T12:42:23,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-11T12:42:23,043 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:23,043 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=51, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:23,044 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=52, ppid=51, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:23,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-11T12:42:23,196 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,198 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-11T12:42:23,199 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:23,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:23,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:23,200 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] handler.RSProcedureHandler(58): pid=52 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:23,200 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=52 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:23,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=52 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:23,292 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/7be270f6aa074fce9a2a4ad6607a1a2d 2024-11-11T12:42:23,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/362afab335fa4846804cd20430e1271b is 50, key is test_row_0/C:col10/1731328941210/Put/seqid=0 2024-11-11T12:42:23,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742023_1199 (size=12151) 2024-11-11T12:42:23,321 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=213 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/362afab335fa4846804cd20430e1271b 2024-11-11T12:42:23,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/bd90ae292a9f41ea9729abd3c3d3769e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/bd90ae292a9f41ea9729abd3c3d3769e 2024-11-11T12:42:23,334 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/bd90ae292a9f41ea9729abd3c3d3769e, entries=150, sequenceid=213, filesize=30.4 K 2024-11-11T12:42:23,338 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/7be270f6aa074fce9a2a4ad6607a1a2d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/7be270f6aa074fce9a2a4ad6607a1a2d 2024-11-11T12:42:23,344 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/7be270f6aa074fce9a2a4ad6607a1a2d, entries=150, sequenceid=213, filesize=11.9 K 2024-11-11T12:42:23,346 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/362afab335fa4846804cd20430e1271b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/362afab335fa4846804cd20430e1271b 2024-11-11T12:42:23,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/362afab335fa4846804cd20430e1271b, entries=150, sequenceid=213, filesize=11.9 K 2024-11-11T12:42:23,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for fe7e7af7c234f1775e0b775751ee14f9 in 1023ms, sequenceid=213, compaction requested=true 2024-11-11T12:42:23,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:23,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:23,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:23,354 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-11T12:42:23,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:23,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:23,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:23,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:42:23,356 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-11T12:42:23,356 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,356 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 164477 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-11T12:42:23,356 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/A is initiating minor compaction (all files) 2024-11-11T12:42:23,357 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/A in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:23,357 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/55449cbfecfb45cfb79357ccc1115a83, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f6082ea43ab1493594b63d6153dd6276, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5c09b5133e534bb4be858353e11ba0ff, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/ac11a70564de4e598c7b5bbbf0b89c1d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/bd90ae292a9f41ea9729abd3c3d3769e] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=160.6 K 2024-11-11T12:42:23,357 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=11 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:23,357 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/55449cbfecfb45cfb79357ccc1115a83, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f6082ea43ab1493594b63d6153dd6276, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5c09b5133e534bb4be858353e11ba0ff, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/ac11a70564de4e598c7b5bbbf0b89c1d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/bd90ae292a9f41ea9729abd3c3d3769e] 2024-11-11T12:42:23,358 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=52 2024-11-11T12:42:23,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:23,358 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:42:23,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:23,358 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 55449cbfecfb45cfb79357ccc1115a83, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731328935396 2024-11-11T12:42:23,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:23,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:23,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:23,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:23,359 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:23,359 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f6082ea43ab1493594b63d6153dd6276, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1731328937555 2024-11-11T12:42:23,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-11T12:42:23,360 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5c09b5133e534bb4be858353e11ba0ff, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1731328938712 2024-11-11T12:42:23,361 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ac11a70564de4e598c7b5bbbf0b89c1d, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731328939065 2024-11-11T12:42:23,362 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting bd90ae292a9f41ea9729abd3c3d3769e, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731328941210 2024-11-11T12:42:23,362 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61063 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-11T12:42:23,363 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/B is initiating minor compaction (all files) 2024-11-11T12:42:23,363 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/B in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:23,363 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3ae9b59b11c640458fbac5bef7121d57, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/ee63ce79a0b94671a7bb11f0e6e27b51, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/4d96e830bec447378381f91837dd2b11, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/7be270f6aa074fce9a2a4ad6607a1a2d] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=59.6 K 2024-11-11T12:42:23,364 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ae9b59b11c640458fbac5bef7121d57, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731328935396 2024-11-11T12:42:23,365 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a7b5b9fcc1ae4d17bd9ed2e2a6d19874, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1731328937570 2024-11-11T12:42:23,366 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ee63ce79a0b94671a7bb11f0e6e27b51, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1731328938712 2024-11-11T12:42:23,367 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4d96e830bec447378381f91837dd2b11, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731328939065 2024-11-11T12:42:23,368 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7be270f6aa074fce9a2a4ad6607a1a2d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731328941210 2024-11-11T12:42:23,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111b9b698c18e7940ebbc8250c6c827bf67_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328942356/Put/seqid=0 2024-11-11T12:42:23,392 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:23,413 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111838d0c92e21b4df39349d1d9c04fde2f_fe7e7af7c234f1775e0b775751ee14f9 store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:23,416 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111838d0c92e21b4df39349d1d9c04fde2f_fe7e7af7c234f1775e0b775751ee14f9, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:23,417 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111838d0c92e21b4df39349d1d9c04fde2f_fe7e7af7c234f1775e0b775751ee14f9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:23,429 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#B#compaction#170 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:23,429 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/e87e14a8a1f946c580e243ab25436ed1 is 50, key is test_row_0/B:col10/1731328941210/Put/seqid=0 2024-11-11T12:42:23,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742024_1200 (size=12304) 2024-11-11T12:42:23,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:23,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742025_1201 (size=4469) 2024-11-11T12:42:23,471 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#A#compaction#169 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:23,471 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111b9b698c18e7940ebbc8250c6c827bf67_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b9b698c18e7940ebbc8250c6c827bf67_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:23,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742026_1202 (size=12629) 2024-11-11T12:42:23,472 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/13705688c9a84ea7aff849e9aba3da8e is 175, key is test_row_0/A:col10/1731328941210/Put/seqid=0 2024-11-11T12:42:23,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/7bc55d68685b40d7b171960d89332e87, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:23,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/7bc55d68685b40d7b171960d89332e87 is 175, key is test_row_0/A:col10/1731328942356/Put/seqid=0 2024-11-11T12:42:23,490 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/e87e14a8a1f946c580e243ab25436ed1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e87e14a8a1f946c580e243ab25436ed1 2024-11-11T12:42:23,502 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/B of fe7e7af7c234f1775e0b775751ee14f9 into e87e14a8a1f946c580e243ab25436ed1(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:23,502 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:23,502 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/B, priority=11, startTime=1731328943354; duration=0sec 2024-11-11T12:42:23,502 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:23,502 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:B 2024-11-11T12:42:23,503 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-11T12:42:23,505 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61063 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-11T12:42:23,505 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/C is initiating minor compaction (all files) 2024-11-11T12:42:23,505 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/C in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:23,505 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/a063a0262e474bc78688ce0ee4f2a47b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ab2767badf54475398556da5efba2e67, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/0adffd5fff784cb487b5d658071d36fd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ac2a38fbd7d447cda4b892cd813b4dcb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/362afab335fa4846804cd20430e1271b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=59.6 K 2024-11-11T12:42:23,506 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a063a0262e474bc78688ce0ee4f2a47b, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=137, earliestPutTs=1731328935396 2024-11-11T12:42:23,506 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ab2767badf54475398556da5efba2e67, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1731328937570 2024-11-11T12:42:23,506 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0adffd5fff784cb487b5d658071d36fd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=175, earliestPutTs=1731328938712 2024-11-11T12:42:23,507 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ac2a38fbd7d447cda4b892cd813b4dcb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=199, earliestPutTs=1731328939065 2024-11-11T12:42:23,507 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 362afab335fa4846804cd20430e1271b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731328941210 2024-11-11T12:42:23,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742028_1204 (size=31105) 2024-11-11T12:42:23,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742027_1203 (size=31583) 2024-11-11T12:42:23,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:23,526 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#C#compaction#171 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:23,527 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/18c17a8f60d84a02b608056281c88acf is 50, key is test_row_0/C:col10/1731328941210/Put/seqid=0 2024-11-11T12:42:23,528 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:23,532 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/13705688c9a84ea7aff849e9aba3da8e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/13705688c9a84ea7aff849e9aba3da8e 2024-11-11T12:42:23,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742029_1205 (size=12629) 2024-11-11T12:42:23,545 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/A of fe7e7af7c234f1775e0b775751ee14f9 into 13705688c9a84ea7aff849e9aba3da8e(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:23,545 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:23,545 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/A, priority=11, startTime=1731328943354; duration=0sec 2024-11-11T12:42:23,545 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:23,545 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:A 2024-11-11T12:42:23,547 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/18c17a8f60d84a02b608056281c88acf as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/18c17a8f60d84a02b608056281c88acf 2024-11-11T12:42:23,553 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/C of fe7e7af7c234f1775e0b775751ee14f9 into 18c17a8f60d84a02b608056281c88acf(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:23,553 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:23,553 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/C, priority=11, startTime=1731328943354; duration=0sec 2024-11-11T12:42:23,553 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:23,553 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:C 2024-11-11T12:42:23,562 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329003559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,565 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329003564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,566 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329003564, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-11T12:42:23,665 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329003665, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,668 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329003667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,669 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329003667, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,869 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329003868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329003870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:23,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329003872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:23,928 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=235, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/7bc55d68685b40d7b171960d89332e87 2024-11-11T12:42:23,963 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/e8e3beefa1354622b0cc08bf42e9692c is 50, key is test_row_0/B:col10/1731328942356/Put/seqid=0 2024-11-11T12:42:23,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742030_1206 (size=12151) 2024-11-11T12:42:23,980 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/e8e3beefa1354622b0cc08bf42e9692c 2024-11-11T12:42:23,992 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/4c152f97c7714a0e934397202d9a24da is 50, key is test_row_0/C:col10/1731328942356/Put/seqid=0 2024-11-11T12:42:24,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742031_1207 (size=12151) 2024-11-11T12:42:24,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-11T12:42:24,173 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329004171, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:24,178 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329004176, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:24,185 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329004184, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:24,420 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=235 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/4c152f97c7714a0e934397202d9a24da 2024-11-11T12:42:24,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/7bc55d68685b40d7b171960d89332e87 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/7bc55d68685b40d7b171960d89332e87 2024-11-11T12:42:24,441 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/7bc55d68685b40d7b171960d89332e87, entries=150, sequenceid=235, filesize=30.4 K 2024-11-11T12:42:24,442 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/e8e3beefa1354622b0cc08bf42e9692c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e8e3beefa1354622b0cc08bf42e9692c 2024-11-11T12:42:24,451 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e8e3beefa1354622b0cc08bf42e9692c, entries=150, sequenceid=235, filesize=11.9 K 2024-11-11T12:42:24,452 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/4c152f97c7714a0e934397202d9a24da as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4c152f97c7714a0e934397202d9a24da 2024-11-11T12:42:24,466 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4c152f97c7714a0e934397202d9a24da, entries=150, sequenceid=235, filesize=11.9 K 2024-11-11T12:42:24,474 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for fe7e7af7c234f1775e0b775751ee14f9 in 1115ms, sequenceid=235, compaction requested=false 2024-11-11T12:42:24,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:24,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:24,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-11T12:42:24,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=52 2024-11-11T12:42:24,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=51 2024-11-11T12:42:24,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4340 sec 2024-11-11T12:42:24,482 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=51, table=TestAcidGuarantees in 1.4400 sec 2024-11-11T12:42:24,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:24,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-11T12:42:24,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:24,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:24,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:24,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:24,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:24,691 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:24,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411115b6de3aa6dbc439fbdd94596c397ea85_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328944679/Put/seqid=0 2024-11-11T12:42:24,728 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329004726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:24,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742032_1208 (size=12304) 2024-11-11T12:42:24,735 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,735 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329004728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:24,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329004729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:24,738 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:24,746 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411115b6de3aa6dbc439fbdd94596c397ea85_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115b6de3aa6dbc439fbdd94596c397ea85_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:24,762 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/f974bec1e5d44cb78adbc3052e13c920, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:24,763 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/f974bec1e5d44cb78adbc3052e13c920 is 175, key is test_row_0/A:col10/1731328944679/Put/seqid=0 2024-11-11T12:42:24,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742033_1209 (size=31105) 2024-11-11T12:42:24,834 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329004833, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:24,837 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329004837, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:24,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:24,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329004839, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,042 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329005041, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329005043, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,045 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329005045, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=51 2024-11-11T12:42:25,169 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 51 completed 2024-11-11T12:42:25,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees 2024-11-11T12:42:25,173 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:25,173 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=53, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:25,174 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:25,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-11T12:42:25,208 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=253, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/f974bec1e5d44cb78adbc3052e13c920 2024-11-11T12:42:25,238 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/3e76a847ce994269951a37bdb3582d4a is 50, key is test_row_0/B:col10/1731328944679/Put/seqid=0 2024-11-11T12:42:25,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-11T12:42:25,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742034_1210 (size=12151) 2024-11-11T12:42:25,300 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/3e76a847ce994269951a37bdb3582d4a 2024-11-11T12:42:25,326 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,326 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-11T12:42:25,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:25,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:25,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:25,327 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] handler.RSProcedureHandler(58): pid=54 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:25,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=54 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:25,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=54 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:25,345 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329005345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,347 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/3f6c7512cd7f4d309cbffef8c850b1f0 is 50, key is test_row_0/C:col10/1731328944679/Put/seqid=0 2024-11-11T12:42:25,351 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329005350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,356 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329005350, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742035_1211 (size=12151) 2024-11-11T12:42:25,381 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=253 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/3f6c7512cd7f4d309cbffef8c850b1f0 2024-11-11T12:42:25,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/f974bec1e5d44cb78adbc3052e13c920 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f974bec1e5d44cb78adbc3052e13c920 2024-11-11T12:42:25,393 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f974bec1e5d44cb78adbc3052e13c920, entries=150, sequenceid=253, filesize=30.4 K 2024-11-11T12:42:25,394 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/3e76a847ce994269951a37bdb3582d4a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3e76a847ce994269951a37bdb3582d4a 2024-11-11T12:42:25,402 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3e76a847ce994269951a37bdb3582d4a, entries=150, sequenceid=253, filesize=11.9 K 2024-11-11T12:42:25,404 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/3f6c7512cd7f4d309cbffef8c850b1f0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3f6c7512cd7f4d309cbffef8c850b1f0 2024-11-11T12:42:25,409 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3f6c7512cd7f4d309cbffef8c850b1f0, entries=150, sequenceid=253, filesize=11.9 K 2024-11-11T12:42:25,412 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for fe7e7af7c234f1775e0b775751ee14f9 in 721ms, sequenceid=253, compaction requested=true 2024-11-11T12:42:25,412 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:25,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:25,412 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:25,412 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:25,412 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:25,413 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93793 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:25,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:25,413 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/A is initiating minor compaction (all files) 2024-11-11T12:42:25,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:25,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:25,413 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:25,413 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/A in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:25,414 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/13705688c9a84ea7aff849e9aba3da8e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/7bc55d68685b40d7b171960d89332e87, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f974bec1e5d44cb78adbc3052e13c920] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=91.6 K 2024-11-11T12:42:25,414 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:25,414 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/13705688c9a84ea7aff849e9aba3da8e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/7bc55d68685b40d7b171960d89332e87, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f974bec1e5d44cb78adbc3052e13c920] 2024-11-11T12:42:25,414 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:25,414 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/B is initiating minor compaction (all files) 2024-11-11T12:42:25,414 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/B in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:25,414 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e87e14a8a1f946c580e243ab25436ed1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e8e3beefa1354622b0cc08bf42e9692c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3e76a847ce994269951a37bdb3582d4a] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=36.1 K 2024-11-11T12:42:25,420 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 13705688c9a84ea7aff849e9aba3da8e, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731328941210 2024-11-11T12:42:25,420 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e87e14a8a1f946c580e243ab25436ed1, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731328941210 2024-11-11T12:42:25,421 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7bc55d68685b40d7b171960d89332e87, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731328942356 2024-11-11T12:42:25,422 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e8e3beefa1354622b0cc08bf42e9692c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731328942356 2024-11-11T12:42:25,422 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f974bec1e5d44cb78adbc3052e13c920, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731328943557 2024-11-11T12:42:25,427 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e76a847ce994269951a37bdb3582d4a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731328943557 2024-11-11T12:42:25,440 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:25,446 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#B#compaction#178 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:25,447 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/1ec361ec2e3d48a98cda2f36ca7821a9 is 50, key is test_row_0/B:col10/1731328944679/Put/seqid=0 2024-11-11T12:42:25,447 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111a91695dec2644aa995df70acd1addce9_fe7e7af7c234f1775e0b775751ee14f9 store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:25,449 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111a91695dec2644aa995df70acd1addce9_fe7e7af7c234f1775e0b775751ee14f9, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:25,449 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111a91695dec2644aa995df70acd1addce9_fe7e7af7c234f1775e0b775751ee14f9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:25,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-11T12:42:25,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742037_1213 (size=4469) 2024-11-11T12:42:25,486 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,491 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=54 2024-11-11T12:42:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:25,492 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#A#compaction#177 average throughput is 0.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:25,492 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:42:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:25,492 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/fa5ad6ee5ceb4a7499cd081d7d4cef8b is 175, key is test_row_0/A:col10/1731328944679/Put/seqid=0 2024-11-11T12:42:25,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:25,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742036_1212 (size=12731) 2024-11-11T12:42:25,508 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/1ec361ec2e3d48a98cda2f36ca7821a9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1ec361ec2e3d48a98cda2f36ca7821a9 2024-11-11T12:42:25,524 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/B of fe7e7af7c234f1775e0b775751ee14f9 into 1ec361ec2e3d48a98cda2f36ca7821a9(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:25,524 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:25,524 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/B, priority=13, startTime=1731328945412; duration=0sec 2024-11-11T12:42:25,524 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:25,524 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:B 2024-11-11T12:42:25,524 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:25,527 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:25,527 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/C is initiating minor compaction (all files) 2024-11-11T12:42:25,527 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/C in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:25,527 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/18c17a8f60d84a02b608056281c88acf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4c152f97c7714a0e934397202d9a24da, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3f6c7512cd7f4d309cbffef8c850b1f0] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=36.1 K 2024-11-11T12:42:25,527 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111fb01ce038a7e4872a2823a9657a99009_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328944714/Put/seqid=0 2024-11-11T12:42:25,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742038_1214 (size=31685) 2024-11-11T12:42:25,528 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 18c17a8f60d84a02b608056281c88acf, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=213, earliestPutTs=1731328941210 2024-11-11T12:42:25,530 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4c152f97c7714a0e934397202d9a24da, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=235, earliestPutTs=1731328942356 2024-11-11T12:42:25,531 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3f6c7512cd7f4d309cbffef8c850b1f0, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731328943557 2024-11-11T12:42:25,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742039_1215 (size=12454) 2024-11-11T12:42:25,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:25,539 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/fa5ad6ee5ceb4a7499cd081d7d4cef8b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/fa5ad6ee5ceb4a7499cd081d7d4cef8b 2024-11-11T12:42:25,548 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111fb01ce038a7e4872a2823a9657a99009_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111fb01ce038a7e4872a2823a9657a99009_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:25,549 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/79c4f2063b224550bf83b1b8e805b72b, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:25,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/79c4f2063b224550bf83b1b8e805b72b is 175, key is test_row_0/A:col10/1731328944714/Put/seqid=0 2024-11-11T12:42:25,552 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#C#compaction#180 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:25,553 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/709db34be3634b46994b53e39fe4c178 is 50, key is test_row_0/C:col10/1731328944679/Put/seqid=0 2024-11-11T12:42:25,563 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/A of fe7e7af7c234f1775e0b775751ee14f9 into fa5ad6ee5ceb4a7499cd081d7d4cef8b(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:25,563 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:25,563 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/A, priority=13, startTime=1731328945412; duration=0sec 2024-11-11T12:42:25,563 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:25,563 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:A 2024-11-11T12:42:25,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742041_1217 (size=12731) 2024-11-11T12:42:25,615 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/709db34be3634b46994b53e39fe4c178 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/709db34be3634b46994b53e39fe4c178 2024-11-11T12:42:25,622 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/C of fe7e7af7c234f1775e0b775751ee14f9 into 709db34be3634b46994b53e39fe4c178(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:25,622 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:25,622 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/C, priority=13, startTime=1731328945413; duration=0sec 2024-11-11T12:42:25,622 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:25,622 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:C 2024-11-11T12:42:25,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742040_1216 (size=31255) 2024-11-11T12:42:25,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:25,782 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:25,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-11T12:42:25,813 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329005812, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,849 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329005848, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329005854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,868 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329005864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:25,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:25,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329005918, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:26,033 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=275, memsize=42.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/79c4f2063b224550bf83b1b8e805b72b 2024-11-11T12:42:26,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/fefe375360c544dc9856411b739b12c4 is 50, key is test_row_0/B:col10/1731328944714/Put/seqid=0 2024-11-11T12:42:26,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742042_1218 (size=12301) 2024-11-11T12:42:26,127 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:26,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329006126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-11T12:42:26,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:26,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329006431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:26,492 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/fefe375360c544dc9856411b739b12c4 2024-11-11T12:42:26,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7652f640134248d1b0e1ecdeadbb2136 is 50, key is test_row_0/C:col10/1731328944714/Put/seqid=0 2024-11-11T12:42:26,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742043_1219 (size=12301) 2024-11-11T12:42:26,859 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329006859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:26,860 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:26,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329006859, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:26,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:26,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329006870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:26,938 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:26,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329006938, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:26,956 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7652f640134248d1b0e1ecdeadbb2136 2024-11-11T12:42:26,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/79c4f2063b224550bf83b1b8e805b72b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/79c4f2063b224550bf83b1b8e805b72b 2024-11-11T12:42:27,004 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/79c4f2063b224550bf83b1b8e805b72b, entries=150, sequenceid=275, filesize=30.5 K 2024-11-11T12:42:27,008 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/fefe375360c544dc9856411b739b12c4 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/fefe375360c544dc9856411b739b12c4 2024-11-11T12:42:27,040 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/fefe375360c544dc9856411b739b12c4, entries=150, sequenceid=275, filesize=12.0 K 2024-11-11T12:42:27,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7652f640134248d1b0e1ecdeadbb2136 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7652f640134248d1b0e1ecdeadbb2136 2024-11-11T12:42:27,062 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7652f640134248d1b0e1ecdeadbb2136, entries=150, sequenceid=275, filesize=12.0 K 2024-11-11T12:42:27,063 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for fe7e7af7c234f1775e0b775751ee14f9 in 1571ms, sequenceid=275, compaction requested=false 2024-11-11T12:42:27,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:27,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:27,064 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-11T12:42:27,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=54 2024-11-11T12:42:27,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=53 2024-11-11T12:42:27,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8940 sec 2024-11-11T12:42:27,082 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=53, table=TestAcidGuarantees in 1.9090 sec 2024-11-11T12:42:27,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=53 2024-11-11T12:42:27,309 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 53 completed 2024-11-11T12:42:27,317 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:27,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees 2024-11-11T12:42:27,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-11T12:42:27,329 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:27,340 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=55, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:27,340 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=56, ppid=55, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:27,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-11T12:42:27,507 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:27,508 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=56 2024-11-11T12:42:27,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:27,508 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:42:27,508 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:27,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:27,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:27,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:27,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:27,509 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:27,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111175d22b3a75324094a27078228fabc92f_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328945811/Put/seqid=0 2024-11-11T12:42:27,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742044_1220 (size=12454) 2024-11-11T12:42:27,540 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:27,572 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111175d22b3a75324094a27078228fabc92f_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111175d22b3a75324094a27078228fabc92f_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:27,584 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2b945eb33af046ae81cda6f45696ffd1, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:27,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2b945eb33af046ae81cda6f45696ffd1 is 175, key is test_row_0/A:col10/1731328945811/Put/seqid=0 2024-11-11T12:42:27,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742045_1221 (size=31255) 2024-11-11T12:42:27,620 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=292, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2b945eb33af046ae81cda6f45696ffd1 2024-11-11T12:42:27,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-11T12:42:27,634 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/739907d282374f2192d062a815fd57f1 is 50, key is test_row_0/B:col10/1731328945811/Put/seqid=0 2024-11-11T12:42:27,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742046_1222 (size=12301) 2024-11-11T12:42:27,665 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/739907d282374f2192d062a815fd57f1 2024-11-11T12:42:27,683 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7dc7e61327d64f6891911bfc94009008 is 50, key is test_row_0/C:col10/1731328945811/Put/seqid=0 2024-11-11T12:42:27,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742047_1223 (size=12301) 2024-11-11T12:42:27,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-11T12:42:27,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:27,957 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:28,061 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329008061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,113 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7dc7e61327d64f6891911bfc94009008 2024-11-11T12:42:28,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/2b945eb33af046ae81cda6f45696ffd1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2b945eb33af046ae81cda6f45696ffd1 2024-11-11T12:42:28,133 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2b945eb33af046ae81cda6f45696ffd1, entries=150, sequenceid=292, filesize=30.5 K 2024-11-11T12:42:28,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/739907d282374f2192d062a815fd57f1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/739907d282374f2192d062a815fd57f1 2024-11-11T12:42:28,140 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/739907d282374f2192d062a815fd57f1, entries=150, sequenceid=292, filesize=12.0 K 2024-11-11T12:42:28,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/7dc7e61327d64f6891911bfc94009008 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7dc7e61327d64f6891911bfc94009008 2024-11-11T12:42:28,148 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7dc7e61327d64f6891911bfc94009008, entries=150, sequenceid=292, filesize=12.0 K 2024-11-11T12:42:28,150 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for fe7e7af7c234f1775e0b775751ee14f9 in 641ms, sequenceid=292, compaction requested=true 2024-11-11T12:42:28,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:28,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:28,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=56}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=56 2024-11-11T12:42:28,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=56 2024-11-11T12:42:28,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=56, resume processing ppid=55 2024-11-11T12:42:28,154 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, ppid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 812 msec 2024-11-11T12:42:28,156 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=55, table=TestAcidGuarantees in 837 msec 2024-11-11T12:42:28,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:28,166 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:42:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:28,166 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:28,177 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411115af66c9694ea4730822ccd68a02bd47a_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328948059/Put/seqid=0 2024-11-11T12:42:28,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742048_1224 (size=14994) 2024-11-11T12:42:28,214 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:28,225 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411115af66c9694ea4730822ccd68a02bd47a_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115af66c9694ea4730822ccd68a02bd47a_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:28,227 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/c94e7a74e7a34f6f9fdd2f87841c8843, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:28,227 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/c94e7a74e7a34f6f9fdd2f87841c8843 is 175, key is test_row_0/A:col10/1731328948059/Put/seqid=0 2024-11-11T12:42:28,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329008232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742049_1225 (size=39949) 2024-11-11T12:42:28,260 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/c94e7a74e7a34f6f9fdd2f87841c8843 2024-11-11T12:42:28,276 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/f454b82b5f9e4010a2a7acd0ed273374 is 50, key is test_row_0/B:col10/1731328948059/Put/seqid=0 2024-11-11T12:42:28,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742050_1226 (size=12301) 2024-11-11T12:42:28,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/f454b82b5f9e4010a2a7acd0ed273374 2024-11-11T12:42:28,298 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/da3aa2b17d014733a0590f98885051d2 is 50, key is test_row_0/C:col10/1731328948059/Put/seqid=0 2024-11-11T12:42:28,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742051_1227 (size=12301) 2024-11-11T12:42:28,339 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329008336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-11-11T12:42:28,432 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 55 completed 2024-11-11T12:42:28,436 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:28,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees 2024-11-11T12:42:28,449 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:28,450 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=57, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:28,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:28,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-11T12:42:28,542 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329008540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-11T12:42:28,602 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,604 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-11T12:42:28,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:28,604 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:28,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:28,605 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] handler.RSProcedureHandler(58): pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:28,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=58 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:28,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=58 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:28,719 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/da3aa2b17d014733a0590f98885051d2 2024-11-11T12:42:28,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/c94e7a74e7a34f6f9fdd2f87841c8843 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/c94e7a74e7a34f6f9fdd2f87841c8843 2024-11-11T12:42:28,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/c94e7a74e7a34f6f9fdd2f87841c8843, entries=200, sequenceid=315, filesize=39.0 K 2024-11-11T12:42:28,731 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/f454b82b5f9e4010a2a7acd0ed273374 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f454b82b5f9e4010a2a7acd0ed273374 2024-11-11T12:42:28,735 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f454b82b5f9e4010a2a7acd0ed273374, entries=150, sequenceid=315, filesize=12.0 K 2024-11-11T12:42:28,737 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/da3aa2b17d014733a0590f98885051d2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/da3aa2b17d014733a0590f98885051d2 2024-11-11T12:42:28,741 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/da3aa2b17d014733a0590f98885051d2, entries=150, sequenceid=315, filesize=12.0 K 2024-11-11T12:42:28,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for fe7e7af7c234f1775e0b775751ee14f9 in 576ms, sequenceid=315, compaction requested=true 2024-11-11T12:42:28,742 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:28,742 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:42:28,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:28,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:28,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:28,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:28,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:28,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:42:28,744 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:42:28,744 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 134144 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:42:28,744 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/A is initiating minor compaction (all files) 2024-11-11T12:42:28,744 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/A in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:28,744 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/fa5ad6ee5ceb4a7499cd081d7d4cef8b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/79c4f2063b224550bf83b1b8e805b72b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2b945eb33af046ae81cda6f45696ffd1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/c94e7a74e7a34f6f9fdd2f87841c8843] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=131 K 2024-11-11T12:42:28,744 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:28,744 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/fa5ad6ee5ceb4a7499cd081d7d4cef8b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/79c4f2063b224550bf83b1b8e805b72b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2b945eb33af046ae81cda6f45696ffd1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/c94e7a74e7a34f6f9fdd2f87841c8843] 2024-11-11T12:42:28,745 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting fa5ad6ee5ceb4a7499cd081d7d4cef8b, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731328943557 2024-11-11T12:42:28,745 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 79c4f2063b224550bf83b1b8e805b72b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731328944714 2024-11-11T12:42:28,745 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2b945eb33af046ae81cda6f45696ffd1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731328945797 2024-11-11T12:42:28,745 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:42:28,746 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/B is initiating minor compaction (all files) 2024-11-11T12:42:28,746 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/B in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:28,746 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1ec361ec2e3d48a98cda2f36ca7821a9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/fefe375360c544dc9856411b739b12c4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/739907d282374f2192d062a815fd57f1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f454b82b5f9e4010a2a7acd0ed273374] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=48.5 K 2024-11-11T12:42:28,746 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting c94e7a74e7a34f6f9fdd2f87841c8843, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731328948022 2024-11-11T12:42:28,746 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1ec361ec2e3d48a98cda2f36ca7821a9, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731328943557 2024-11-11T12:42:28,747 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting fefe375360c544dc9856411b739b12c4, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731328944714 2024-11-11T12:42:28,747 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 739907d282374f2192d062a815fd57f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731328945797 2024-11-11T12:42:28,748 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f454b82b5f9e4010a2a7acd0ed273374, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731328948022 2024-11-11T12:42:28,756 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:28,757 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,758 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=58 2024-11-11T12:42:28,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-11T12:42:28,758 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:28,758 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#B#compaction#190 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:28,758 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:42:28,758 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111f71d896c72854e9c84c48908e3fee9e3_fe7e7af7c234f1775e0b775751ee14f9 store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:28,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:28,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:28,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:28,759 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/1e2db1c5e53a47aa8ac8e05d29777b52 is 50, key is test_row_0/B:col10/1731328948059/Put/seqid=0 2024-11-11T12:42:28,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:28,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:28,759 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:28,761 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111f71d896c72854e9c84c48908e3fee9e3_fe7e7af7c234f1775e0b775751ee14f9, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:28,761 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111f71d896c72854e9c84c48908e3fee9e3_fe7e7af7c234f1775e0b775751ee14f9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:28,770 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111189d03073fa94438c858bdc5b3f00b70c_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328948201/Put/seqid=0 2024-11-11T12:42:28,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742052_1228 (size=13017) 2024-11-11T12:42:28,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742053_1229 (size=4469) 2024-11-11T12:42:28,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742054_1230 (size=12454) 2024-11-11T12:42:28,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:28,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:28,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40200 deadline: 1731329008875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,880 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40186 deadline: 1731329008875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,880 DEBUG [Thread-699 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:28,880 DEBUG [Thread-703 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4152 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:28,881 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,881 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329008877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731329008877, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,882 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329008878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329008982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731329008983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:28,984 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:28,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329008983, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:29,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-11T12:42:29,186 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:29,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329009186, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:29,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:29,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731329009187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:29,187 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:29,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329009187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:29,199 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/1e2db1c5e53a47aa8ac8e05d29777b52 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1e2db1c5e53a47aa8ac8e05d29777b52 2024-11-11T12:42:29,201 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#A#compaction#189 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:29,201 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/b0d643b89d854e17b4e544fe7e5945aa is 175, key is test_row_0/A:col10/1731328948059/Put/seqid=0 2024-11-11T12:42:29,207 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/B of fe7e7af7c234f1775e0b775751ee14f9 into 1e2db1c5e53a47aa8ac8e05d29777b52(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:29,207 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:29,207 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/B, priority=12, startTime=1731328948743; duration=0sec 2024-11-11T12:42:29,207 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:29,207 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:B 2024-11-11T12:42:29,207 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:42:29,209 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49634 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:42:29,209 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/C is initiating minor compaction (all files) 2024-11-11T12:42:29,209 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/C in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:29,209 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/709db34be3634b46994b53e39fe4c178, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7652f640134248d1b0e1ecdeadbb2136, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7dc7e61327d64f6891911bfc94009008, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/da3aa2b17d014733a0590f98885051d2] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=48.5 K 2024-11-11T12:42:29,210 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 709db34be3634b46994b53e39fe4c178, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=253, earliestPutTs=1731328943557 2024-11-11T12:42:29,211 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7652f640134248d1b0e1ecdeadbb2136, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1731328944714 2024-11-11T12:42:29,211 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7dc7e61327d64f6891911bfc94009008, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731328945797 2024-11-11T12:42:29,211 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting da3aa2b17d014733a0590f98885051d2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731328948022 2024-11-11T12:42:29,220 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:29,226 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111189d03073fa94438c858bdc5b3f00b70c_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111189d03073fa94438c858bdc5b3f00b70c_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:29,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/01de1577e2bb4f1daba3e9b8e534d4e6, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:29,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/01de1577e2bb4f1daba3e9b8e534d4e6 is 175, key is test_row_0/A:col10/1731328948201/Put/seqid=0 2024-11-11T12:42:29,243 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#C#compaction#192 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:29,243 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/912f1c156903407abef8405e007cc532 is 50, key is test_row_0/C:col10/1731328948059/Put/seqid=0 2024-11-11T12:42:29,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742055_1231 (size=31971) 2024-11-11T12:42:29,262 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/b0d643b89d854e17b4e544fe7e5945aa as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b0d643b89d854e17b4e544fe7e5945aa 2024-11-11T12:42:29,271 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/A of fe7e7af7c234f1775e0b775751ee14f9 into b0d643b89d854e17b4e544fe7e5945aa(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:29,271 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:29,271 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/A, priority=12, startTime=1731328948742; duration=0sec 2024-11-11T12:42:29,271 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:29,271 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:A 2024-11-11T12:42:29,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742056_1232 (size=31255) 2024-11-11T12:42:29,297 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=328, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/01de1577e2bb4f1daba3e9b8e534d4e6 2024-11-11T12:42:29,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742057_1233 (size=13017) 2024-11-11T12:42:29,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/2d4522783b554145a91d13ddd0e85ca6 is 50, key is test_row_0/B:col10/1731328948201/Put/seqid=0 2024-11-11T12:42:29,350 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/912f1c156903407abef8405e007cc532 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/912f1c156903407abef8405e007cc532 2024-11-11T12:42:29,362 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/C of fe7e7af7c234f1775e0b775751ee14f9 into 912f1c156903407abef8405e007cc532(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:29,362 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:29,362 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/C, priority=12, startTime=1731328948743; duration=0sec 2024-11-11T12:42:29,362 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:29,362 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:C 2024-11-11T12:42:29,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742058_1234 (size=12301) 2024-11-11T12:42:29,379 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/2d4522783b554145a91d13ddd0e85ca6 2024-11-11T12:42:29,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/f4a92b570cc74bd8bf27bb3229f7d276 is 50, key is test_row_0/C:col10/1731328948201/Put/seqid=0 2024-11-11T12:42:29,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742059_1235 (size=12301) 2024-11-11T12:42:29,489 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:29,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731329009488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:29,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:29,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329009490, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:29,491 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:29,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329009491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:29,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-11T12:42:29,815 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/f4a92b570cc74bd8bf27bb3229f7d276 2024-11-11T12:42:29,839 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/01de1577e2bb4f1daba3e9b8e534d4e6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/01de1577e2bb4f1daba3e9b8e534d4e6 2024-11-11T12:42:29,845 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/01de1577e2bb4f1daba3e9b8e534d4e6, entries=150, sequenceid=328, filesize=30.5 K 2024-11-11T12:42:29,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/2d4522783b554145a91d13ddd0e85ca6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/2d4522783b554145a91d13ddd0e85ca6 2024-11-11T12:42:29,852 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/2d4522783b554145a91d13ddd0e85ca6, entries=150, sequenceid=328, filesize=12.0 K 2024-11-11T12:42:29,856 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/f4a92b570cc74bd8bf27bb3229f7d276 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/f4a92b570cc74bd8bf27bb3229f7d276 2024-11-11T12:42:29,866 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/f4a92b570cc74bd8bf27bb3229f7d276, entries=150, sequenceid=328, filesize=12.0 K 2024-11-11T12:42:29,869 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for fe7e7af7c234f1775e0b775751ee14f9 in 1110ms, sequenceid=328, compaction requested=false 2024-11-11T12:42:29,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:29,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:29,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=58}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=58 2024-11-11T12:42:29,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=58 2024-11-11T12:42:29,873 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=58, resume processing ppid=57 2024-11-11T12:42:29,873 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4210 sec 2024-11-11T12:42:29,875 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=57, table=TestAcidGuarantees in 1.4370 sec 2024-11-11T12:42:29,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:29,995 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:42:29,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:29,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:29,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:29,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:29,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:29,995 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:30,003 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111992d996e999c4068bc22c35d2fadd5af_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328949993/Put/seqid=0 2024-11-11T12:42:30,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742060_1236 (size=12454) 2024-11-11T12:42:30,023 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731329010013, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329010024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,025 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329010024, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329010128, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731329010129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,143 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329010142, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40220 deadline: 1731329010333, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,334 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40240 deadline: 1731329010334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,346 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:30,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:40208 deadline: 1731329010346, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,408 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:30,416 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111992d996e999c4068bc22c35d2fadd5af_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111992d996e999c4068bc22c35d2fadd5af_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:30,417 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/339c65a112834eab8df966004f1e7653, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:30,418 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/339c65a112834eab8df966004f1e7653 is 175, key is test_row_0/A:col10/1731328949993/Put/seqid=0 2024-11-11T12:42:30,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742061_1237 (size=31255) 2024-11-11T12:42:30,426 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/339c65a112834eab8df966004f1e7653 2024-11-11T12:42:30,435 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/f1d57e86fe624dd1a68b6e5a8b55e93b is 50, key is test_row_0/B:col10/1731328949993/Put/seqid=0 2024-11-11T12:42:30,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742062_1238 (size=12301) 2024-11-11T12:42:30,448 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/f1d57e86fe624dd1a68b6e5a8b55e93b 2024-11-11T12:42:30,466 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/51f91545aec7449291d32c6ba786c1bc is 50, key is test_row_0/C:col10/1731328949993/Put/seqid=0 2024-11-11T12:42:30,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742063_1239 (size=12301) 2024-11-11T12:42:30,513 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/51f91545aec7449291d32c6ba786c1bc 2024-11-11T12:42:30,522 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/339c65a112834eab8df966004f1e7653 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/339c65a112834eab8df966004f1e7653 2024-11-11T12:42:30,532 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/339c65a112834eab8df966004f1e7653, entries=150, sequenceid=355, filesize=30.5 K 2024-11-11T12:42:30,533 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/f1d57e86fe624dd1a68b6e5a8b55e93b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f1d57e86fe624dd1a68b6e5a8b55e93b 2024-11-11T12:42:30,539 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f1d57e86fe624dd1a68b6e5a8b55e93b, entries=150, sequenceid=355, filesize=12.0 K 2024-11-11T12:42:30,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/51f91545aec7449291d32c6ba786c1bc as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/51f91545aec7449291d32c6ba786c1bc 2024-11-11T12:42:30,546 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/51f91545aec7449291d32c6ba786c1bc, entries=150, sequenceid=355, filesize=12.0 K 2024-11-11T12:42:30,549 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for fe7e7af7c234f1775e0b775751ee14f9 in 555ms, sequenceid=355, compaction requested=true 2024-11-11T12:42:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:30,549 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store fe7e7af7c234f1775e0b775751ee14f9:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:30,549 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:30,549 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:30,552 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:30,552 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/B is initiating minor compaction (all files) 2024-11-11T12:42:30,552 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/B in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:30,553 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1e2db1c5e53a47aa8ac8e05d29777b52, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/2d4522783b554145a91d13ddd0e85ca6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f1d57e86fe624dd1a68b6e5a8b55e93b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=36.7 K 2024-11-11T12:42:30,553 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94481 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:30,553 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/A is initiating minor compaction (all files) 2024-11-11T12:42:30,553 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/A in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:30,553 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b0d643b89d854e17b4e544fe7e5945aa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/01de1577e2bb4f1daba3e9b8e534d4e6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/339c65a112834eab8df966004f1e7653] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=92.3 K 2024-11-11T12:42:30,553 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:30,553 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b0d643b89d854e17b4e544fe7e5945aa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/01de1577e2bb4f1daba3e9b8e534d4e6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/339c65a112834eab8df966004f1e7653] 2024-11-11T12:42:30,556 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e2db1c5e53a47aa8ac8e05d29777b52, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731328948022 2024-11-11T12:42:30,556 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b0d643b89d854e17b4e544fe7e5945aa, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731328948022 2024-11-11T12:42:30,556 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d4522783b554145a91d13ddd0e85ca6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731328948182 2024-11-11T12:42:30,556 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 01de1577e2bb4f1daba3e9b8e534d4e6, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731328948182 2024-11-11T12:42:30,557 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f1d57e86fe624dd1a68b6e5a8b55e93b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731328948870 2024-11-11T12:42:30,557 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 339c65a112834eab8df966004f1e7653, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731328948870 2024-11-11T12:42:30,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=57 2024-11-11T12:42:30,561 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 57 completed 2024-11-11T12:42:30,572 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:30,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees 2024-11-11T12:42:30,576 DEBUG [Thread-712 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3448d233 to 127.0.0.1:54294 2024-11-11T12:42:30,576 DEBUG [Thread-712 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:30,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-11T12:42:30,578 DEBUG [Thread-714 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a11164b to 127.0.0.1:54294 2024-11-11T12:42:30,578 DEBUG [Thread-714 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:30,580 DEBUG [Thread-708 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x247c0c93 to 127.0.0.1:54294 2024-11-11T12:42:30,580 DEBUG [Thread-708 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:30,581 DEBUG [Thread-710 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x517ff977 to 127.0.0.1:54294 2024-11-11T12:42:30,581 DEBUG [Thread-710 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:30,582 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:30,584 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#B#compaction#199 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:30,585 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/e001acf978fb42f6ace95e76621a7220 is 50, key is test_row_0/B:col10/1731328949993/Put/seqid=0 2024-11-11T12:42:30,588 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:30,590 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111ea96d39ac25b4b2f9d0d18230411e1c4_fe7e7af7c234f1775e0b775751ee14f9 store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:30,591 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=59, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:30,591 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:30,591 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111ea96d39ac25b4b2f9d0d18230411e1c4_fe7e7af7c234f1775e0b775751ee14f9, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:30,592 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ea96d39ac25b4b2f9d0d18230411e1c4_fe7e7af7c234f1775e0b775751ee14f9 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:30,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742064_1240 (size=13119) 2024-11-11T12:42:30,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742065_1241 (size=4469) 2024-11-11T12:42:30,617 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#A#compaction#198 average throughput is 0.70 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:30,618 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/372cef4ec4fd419db21586744a3365da is 175, key is test_row_0/A:col10/1731328949993/Put/seqid=0 2024-11-11T12:42:30,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742066_1242 (size=32073) 2024-11-11T12:42:30,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:30,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:42:30,638 DEBUG [Thread-701 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0b44b1e5 to 127.0.0.1:54294 2024-11-11T12:42:30,638 DEBUG [Thread-701 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:30,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:30,638 DEBUG [Thread-705 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a4c53ed to 127.0.0.1:54294 2024-11-11T12:42:30,638 DEBUG [Thread-705 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:30,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:30,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:30,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:30,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:30,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:30,644 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111094b1ec0d0ae47cf83877e94ed8a003c_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328950011/Put/seqid=0 2024-11-11T12:42:30,649 DEBUG [Thread-697 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cae6c5c to 127.0.0.1:54294 2024-11-11T12:42:30,649 DEBUG [Thread-697 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:30,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742067_1243 (size=12454) 2024-11-11T12:42:30,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-11T12:42:30,742 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:30,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:30,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:30,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:30,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:30,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:30,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:30,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-11T12:42:30,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:30,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:30,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:30,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:30,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:30,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:30,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:30,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,014 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/e001acf978fb42f6ace95e76621a7220 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e001acf978fb42f6ace95e76621a7220 2024-11-11T12:42:31,020 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/B of fe7e7af7c234f1775e0b775751ee14f9 into e001acf978fb42f6ace95e76621a7220(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:31,020 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:31,020 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/B, priority=13, startTime=1731328950549; duration=0sec 2024-11-11T12:42:31,020 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:31,020 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:B 2024-11-11T12:42:31,020 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:31,021 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37619 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:31,022 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): fe7e7af7c234f1775e0b775751ee14f9/C is initiating minor compaction (all files) 2024-11-11T12:42:31,022 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of fe7e7af7c234f1775e0b775751ee14f9/C in TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,022 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/912f1c156903407abef8405e007cc532, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/f4a92b570cc74bd8bf27bb3229f7d276, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/51f91545aec7449291d32c6ba786c1bc] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp, totalSize=36.7 K 2024-11-11T12:42:31,023 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 912f1c156903407abef8405e007cc532, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731328948022 2024-11-11T12:42:31,023 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f4a92b570cc74bd8bf27bb3229f7d276, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731328948182 2024-11-11T12:42:31,024 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 51f91545aec7449291d32c6ba786c1bc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731328948870 2024-11-11T12:42:31,032 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/372cef4ec4fd419db21586744a3365da as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/372cef4ec4fd419db21586744a3365da 2024-11-11T12:42:31,036 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): fe7e7af7c234f1775e0b775751ee14f9#C#compaction#201 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:31,037 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/e80f3aea161640858483d71db6130af0 is 50, key is test_row_0/C:col10/1731328949993/Put/seqid=0 2024-11-11T12:42:31,040 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/A of fe7e7af7c234f1775e0b775751ee14f9 into 372cef4ec4fd419db21586744a3365da(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:31,040 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:31,040 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/A, priority=13, startTime=1731328950549; duration=0sec 2024-11-11T12:42:31,040 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:31,040 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:A 2024-11-11T12:42:31,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742068_1244 (size=13119) 2024-11-11T12:42:31,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:31,050 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:31,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:31,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:31,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,055 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111094b1ec0d0ae47cf83877e94ed8a003c_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111094b1ec0d0ae47cf83877e94ed8a003c_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:31,056 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/e80f3aea161640858483d71db6130af0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/e80f3aea161640858483d71db6130af0 2024-11-11T12:42:31,056 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/5af8e051e53f4ec08224ffed0077d5ce, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:31,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/5af8e051e53f4ec08224ffed0077d5ce is 175, key is test_row_0/A:col10/1731328950011/Put/seqid=0 2024-11-11T12:42:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742069_1245 (size=31255) 2024-11-11T12:42:31,063 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=369, memsize=24.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/5af8e051e53f4ec08224ffed0077d5ce 2024-11-11T12:42:31,067 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in fe7e7af7c234f1775e0b775751ee14f9/C of fe7e7af7c234f1775e0b775751ee14f9 into e80f3aea161640858483d71db6130af0(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:31,067 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:31,067 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9., storeName=fe7e7af7c234f1775e0b775751ee14f9/C, priority=13, startTime=1731328950549; duration=0sec 2024-11-11T12:42:31,067 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:31,067 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: fe7e7af7c234f1775e0b775751ee14f9:C 2024-11-11T12:42:31,072 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/805b5f0253824cad9ccf819cb9963b24 is 50, key is test_row_0/B:col10/1731328950011/Put/seqid=0 2024-11-11T12:42:31,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742070_1246 (size=12301) 2024-11-11T12:42:31,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-11T12:42:31,203 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:31,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:31,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,204 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,356 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:31,357 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:31,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:31,357 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,357 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,358 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,490 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/805b5f0253824cad9ccf819cb9963b24 2024-11-11T12:42:31,499 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/04bc5120c6794389b100cc8f4d933df9 is 50, key is test_row_0/C:col10/1731328950011/Put/seqid=0 2024-11-11T12:42:31,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742071_1247 (size=12301) 2024-11-11T12:42:31,510 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:31,511 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:31,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:31,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,511 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,664 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:31,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:31,664 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:31,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,665 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-11T12:42:31,816 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:31,816 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:31,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. as already flushing 2024-11-11T12:42:31,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,817 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] handler.RSProcedureHandler(58): pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=60 java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=60 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:31,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=369 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/04bc5120c6794389b100cc8f4d933df9 2024-11-11T12:42:31,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/5af8e051e53f4ec08224ffed0077d5ce as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5af8e051e53f4ec08224ffed0077d5ce 2024-11-11T12:42:31,915 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5af8e051e53f4ec08224ffed0077d5ce, entries=150, sequenceid=369, filesize=30.5 K 2024-11-11T12:42:31,916 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/805b5f0253824cad9ccf819cb9963b24 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/805b5f0253824cad9ccf819cb9963b24 2024-11-11T12:42:31,924 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/805b5f0253824cad9ccf819cb9963b24, entries=150, sequenceid=369, filesize=12.0 K 2024-11-11T12:42:31,925 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/04bc5120c6794389b100cc8f4d933df9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/04bc5120c6794389b100cc8f4d933df9 2024-11-11T12:42:31,932 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/04bc5120c6794389b100cc8f4d933df9, entries=150, sequenceid=369, filesize=12.0 K 2024-11-11T12:42:31,933 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=6.71 KB/6870 for fe7e7af7c234f1775e0b775751ee14f9 in 1296ms, sequenceid=369, compaction requested=false 2024-11-11T12:42:31,933 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:31,969 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:31,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=60 2024-11-11T12:42:31,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:31,970 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-11T12:42:31,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:31,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:31,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:31,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:31,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:31,971 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:31,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111441365e8b1ef4250b696ac7545ed85a4_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_0/A:col10/1731328950648/Put/seqid=0 2024-11-11T12:42:31,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742072_1248 (size=7374) 2024-11-11T12:42:31,990 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:31,998 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111441365e8b1ef4250b696ac7545ed85a4_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111441365e8b1ef4250b696ac7545ed85a4_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:31,999 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/3e148c40c8fc4039a3c8a40ba2499b22, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:32,000 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/3e148c40c8fc4039a3c8a40ba2499b22 is 175, key is test_row_0/A:col10/1731328950648/Put/seqid=0 2024-11-11T12:42:32,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742073_1249 (size=13865) 2024-11-11T12:42:32,022 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=376, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/3e148c40c8fc4039a3c8a40ba2499b22 2024-11-11T12:42:32,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/07804f04c1c64b198aee7a90736e352d is 50, key is test_row_0/B:col10/1731328950648/Put/seqid=0 2024-11-11T12:42:32,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742074_1250 (size=7415) 2024-11-11T12:42:32,045 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/07804f04c1c64b198aee7a90736e352d 2024-11-11T12:42:32,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/21a2d5103301406ea84170d91a3283f3 is 50, key is test_row_0/C:col10/1731328950648/Put/seqid=0 2024-11-11T12:42:32,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742075_1251 (size=7415) 2024-11-11T12:42:32,070 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=376 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/21a2d5103301406ea84170d91a3283f3 2024-11-11T12:42:32,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/3e148c40c8fc4039a3c8a40ba2499b22 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/3e148c40c8fc4039a3c8a40ba2499b22 2024-11-11T12:42:32,079 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/3e148c40c8fc4039a3c8a40ba2499b22, entries=50, sequenceid=376, filesize=13.5 K 2024-11-11T12:42:32,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/07804f04c1c64b198aee7a90736e352d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/07804f04c1c64b198aee7a90736e352d 2024-11-11T12:42:32,083 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/07804f04c1c64b198aee7a90736e352d, entries=50, sequenceid=376, filesize=7.2 K 2024-11-11T12:42:32,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/21a2d5103301406ea84170d91a3283f3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/21a2d5103301406ea84170d91a3283f3 2024-11-11T12:42:32,087 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/21a2d5103301406ea84170d91a3283f3, entries=50, sequenceid=376, filesize=7.2 K 2024-11-11T12:42:32,088 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for fe7e7af7c234f1775e0b775751ee14f9 in 117ms, sequenceid=376, compaction requested=true 2024-11-11T12:42:32,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.HRegion(2538): Flush status journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:32,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:32,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=60}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=60 2024-11-11T12:42:32,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=60 2024-11-11T12:42:32,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-11-11T12:42:32,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4980 sec 2024-11-11T12:42:32,091 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=59, table=TestAcidGuarantees in 1.5180 sec 2024-11-11T12:42:32,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=59 2024-11-11T12:42:32,682 INFO [Thread-707 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 59 completed 2024-11-11T12:42:32,888 DEBUG [Thread-703 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x42e904d8 to 127.0.0.1:54294 2024-11-11T12:42:32,888 DEBUG [Thread-703 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:32,915 DEBUG [Thread-699 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c820ef9 to 127.0.0.1:54294 2024-11-11T12:42:32,915 DEBUG [Thread-699 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 70 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 65 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 18 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4894 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 4712 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 2007 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 6021 rows 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1985 2024-11-11T12:42:32,915 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5955 rows 2024-11-11T12:42:32,915 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T12:42:32,915 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7a9b9802 to 127.0.0.1:54294 2024-11-11T12:42:32,915 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:32,918 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-11T12:42:32,918 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-11T12:42:32,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=61, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:32,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-11T12:42:32,927 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328952927"}]},"ts":"1731328952927"} 2024-11-11T12:42:32,928 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-11T12:42:32,930 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-11T12:42:32,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=62, ppid=61, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:42:32,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, UNASSIGN}] 2024-11-11T12:42:32,934 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=63, ppid=62, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, UNASSIGN 2024-11-11T12:42:32,936 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=fe7e7af7c234f1775e0b775751ee14f9, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:32,937 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:42:32,937 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE; CloseRegionProcedure fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:42:33,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-11T12:42:33,088 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:33,089 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(124): Close fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:33,089 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:42:33,089 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1681): Closing fe7e7af7c234f1775e0b775751ee14f9, disabling compactions & flushes 2024-11-11T12:42:33,089 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:33,089 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:33,089 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. after waiting 0 ms 2024-11-11T12:42:33,089 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:33,089 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(2837): Flushing fe7e7af7c234f1775e0b775751ee14f9 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-11T12:42:33,089 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=A 2024-11-11T12:42:33,090 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:33,090 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=B 2024-11-11T12:42:33,090 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:33,090 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactingMemStore(205): FLUSHING TO DISK fe7e7af7c234f1775e0b775751ee14f9, store=C 2024-11-11T12:42:33,090 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:33,095 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411113b6a8c6e43fc461986a910f9b005ee60_fe7e7af7c234f1775e0b775751ee14f9 is 50, key is test_row_2/A:col10/1731328952913/Put/seqid=0 2024-11-11T12:42:33,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742076_1252 (size=7374) 2024-11-11T12:42:33,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-11T12:42:33,500 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:33,505 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411113b6a8c6e43fc461986a910f9b005ee60_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411113b6a8c6e43fc461986a910f9b005ee60_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:33,506 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/d575a4801de741689be30a8189991bba, store: [table=TestAcidGuarantees family=A region=fe7e7af7c234f1775e0b775751ee14f9] 2024-11-11T12:42:33,507 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/d575a4801de741689be30a8189991bba is 175, key is test_row_2/A:col10/1731328952913/Put/seqid=0 2024-11-11T12:42:33,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742077_1253 (size=13865) 2024-11-11T12:42:33,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-11T12:42:33,912 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=381, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/d575a4801de741689be30a8189991bba 2024-11-11T12:42:33,924 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/33782e22c2f041ec9fb77884e1507116 is 50, key is test_row_2/B:col10/1731328952913/Put/seqid=0 2024-11-11T12:42:33,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742078_1254 (size=7415) 2024-11-11T12:42:34,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-11T12:42:34,337 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/33782e22c2f041ec9fb77884e1507116 2024-11-11T12:42:34,345 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/c77f525397d94d26a8e358b109860019 is 50, key is test_row_2/C:col10/1731328952913/Put/seqid=0 2024-11-11T12:42:34,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742079_1255 (size=7415) 2024-11-11T12:42:34,758 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=381 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/c77f525397d94d26a8e358b109860019 2024-11-11T12:42:34,764 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/A/d575a4801de741689be30a8189991bba as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/d575a4801de741689be30a8189991bba 2024-11-11T12:42:34,769 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/d575a4801de741689be30a8189991bba, entries=50, sequenceid=381, filesize=13.5 K 2024-11-11T12:42:34,770 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/B/33782e22c2f041ec9fb77884e1507116 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/33782e22c2f041ec9fb77884e1507116 2024-11-11T12:42:34,774 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/33782e22c2f041ec9fb77884e1507116, entries=50, sequenceid=381, filesize=7.2 K 2024-11-11T12:42:34,775 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/.tmp/C/c77f525397d94d26a8e358b109860019 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/c77f525397d94d26a8e358b109860019 2024-11-11T12:42:34,780 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/c77f525397d94d26a8e358b109860019, entries=50, sequenceid=381, filesize=7.2 K 2024-11-11T12:42:34,781 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=0 B/0 for fe7e7af7c234f1775e0b775751ee14f9 in 1692ms, sequenceid=381, compaction requested=true 2024-11-11T12:42:34,781 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b5a9b9c4269442ad804c09cccc1ff2dd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/89aef9a17c174728ae4fff7ea3a45654, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/60808cfbdfe944578f4d7e45adfc7cce, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2c80448f4bad4e6a855f8624024ae2ea, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/0b144c86b2b04ffab0e7ea384f6d1bdb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/9441e002d3ec4c8086ea3f382595a385, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/a09f58fcf62c4059b103cadc38cba57a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/85213c7265a746299e1d7a86cc85bd65, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/55449cbfecfb45cfb79357ccc1115a83, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2d08e551310640d580d64521904a852e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f6082ea43ab1493594b63d6153dd6276, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5c09b5133e534bb4be858353e11ba0ff, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/ac11a70564de4e598c7b5bbbf0b89c1d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/13705688c9a84ea7aff849e9aba3da8e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/bd90ae292a9f41ea9729abd3c3d3769e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/7bc55d68685b40d7b171960d89332e87, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/fa5ad6ee5ceb4a7499cd081d7d4cef8b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f974bec1e5d44cb78adbc3052e13c920, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/79c4f2063b224550bf83b1b8e805b72b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2b945eb33af046ae81cda6f45696ffd1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/c94e7a74e7a34f6f9fdd2f87841c8843, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b0d643b89d854e17b4e544fe7e5945aa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/01de1577e2bb4f1daba3e9b8e534d4e6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/339c65a112834eab8df966004f1e7653] to archive 2024-11-11T12:42:34,783 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:42:34,786 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b5a9b9c4269442ad804c09cccc1ff2dd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b5a9b9c4269442ad804c09cccc1ff2dd 2024-11-11T12:42:34,788 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/89aef9a17c174728ae4fff7ea3a45654 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/89aef9a17c174728ae4fff7ea3a45654 2024-11-11T12:42:34,789 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/60808cfbdfe944578f4d7e45adfc7cce to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/60808cfbdfe944578f4d7e45adfc7cce 2024-11-11T12:42:34,791 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2c80448f4bad4e6a855f8624024ae2ea to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2c80448f4bad4e6a855f8624024ae2ea 2024-11-11T12:42:34,801 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/0b144c86b2b04ffab0e7ea384f6d1bdb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/0b144c86b2b04ffab0e7ea384f6d1bdb 2024-11-11T12:42:34,803 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/9441e002d3ec4c8086ea3f382595a385 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/9441e002d3ec4c8086ea3f382595a385 2024-11-11T12:42:34,804 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/a09f58fcf62c4059b103cadc38cba57a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/a09f58fcf62c4059b103cadc38cba57a 2024-11-11T12:42:34,808 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/85213c7265a746299e1d7a86cc85bd65 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/85213c7265a746299e1d7a86cc85bd65 2024-11-11T12:42:34,810 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/55449cbfecfb45cfb79357ccc1115a83 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/55449cbfecfb45cfb79357ccc1115a83 2024-11-11T12:42:34,811 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2d08e551310640d580d64521904a852e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2d08e551310640d580d64521904a852e 2024-11-11T12:42:34,813 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f6082ea43ab1493594b63d6153dd6276 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f6082ea43ab1493594b63d6153dd6276 2024-11-11T12:42:34,815 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5c09b5133e534bb4be858353e11ba0ff to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5c09b5133e534bb4be858353e11ba0ff 2024-11-11T12:42:34,817 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/ac11a70564de4e598c7b5bbbf0b89c1d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/ac11a70564de4e598c7b5bbbf0b89c1d 2024-11-11T12:42:34,822 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/13705688c9a84ea7aff849e9aba3da8e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/13705688c9a84ea7aff849e9aba3da8e 2024-11-11T12:42:34,824 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/bd90ae292a9f41ea9729abd3c3d3769e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/bd90ae292a9f41ea9729abd3c3d3769e 2024-11-11T12:42:34,825 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/7bc55d68685b40d7b171960d89332e87 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/7bc55d68685b40d7b171960d89332e87 2024-11-11T12:42:34,827 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/fa5ad6ee5ceb4a7499cd081d7d4cef8b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/fa5ad6ee5ceb4a7499cd081d7d4cef8b 2024-11-11T12:42:34,829 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f974bec1e5d44cb78adbc3052e13c920 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/f974bec1e5d44cb78adbc3052e13c920 2024-11-11T12:42:34,830 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/79c4f2063b224550bf83b1b8e805b72b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/79c4f2063b224550bf83b1b8e805b72b 2024-11-11T12:42:34,833 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2b945eb33af046ae81cda6f45696ffd1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/2b945eb33af046ae81cda6f45696ffd1 2024-11-11T12:42:34,835 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/c94e7a74e7a34f6f9fdd2f87841c8843 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/c94e7a74e7a34f6f9fdd2f87841c8843 2024-11-11T12:42:34,837 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b0d643b89d854e17b4e544fe7e5945aa to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/b0d643b89d854e17b4e544fe7e5945aa 2024-11-11T12:42:34,838 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/01de1577e2bb4f1daba3e9b8e534d4e6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/01de1577e2bb4f1daba3e9b8e534d4e6 2024-11-11T12:42:34,840 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/339c65a112834eab8df966004f1e7653 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/339c65a112834eab8df966004f1e7653 2024-11-11T12:42:34,846 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1b2c067520cc4a389c792afcf6b4a301, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/c4e04d4a48754b08bc40d4acc3a433c6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/bcdff7e0fcf647f897f1d47b9505269d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a9bea331eb174aca81d527817ca1a850, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/166b9b6995b54d138493bec0a6fca487, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/d494ef56cfdb4df2bdb02ef5c74ad3e9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/765c8385ea7b4c0b854998f3e0b4f349, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a2318b042e7f4f39a1e067bc8331d894, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3ae9b59b11c640458fbac5bef7121d57, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/283e2f8e02214eaa899b1711f6b593f8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/ee63ce79a0b94671a7bb11f0e6e27b51, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/4d96e830bec447378381f91837dd2b11, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e87e14a8a1f946c580e243ab25436ed1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/7be270f6aa074fce9a2a4ad6607a1a2d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e8e3beefa1354622b0cc08bf42e9692c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1ec361ec2e3d48a98cda2f36ca7821a9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3e76a847ce994269951a37bdb3582d4a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/fefe375360c544dc9856411b739b12c4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/739907d282374f2192d062a815fd57f1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1e2db1c5e53a47aa8ac8e05d29777b52, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f454b82b5f9e4010a2a7acd0ed273374, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/2d4522783b554145a91d13ddd0e85ca6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f1d57e86fe624dd1a68b6e5a8b55e93b] to archive 2024-11-11T12:42:34,847 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:42:34,850 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1b2c067520cc4a389c792afcf6b4a301 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1b2c067520cc4a389c792afcf6b4a301 2024-11-11T12:42:34,852 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/c4e04d4a48754b08bc40d4acc3a433c6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/c4e04d4a48754b08bc40d4acc3a433c6 2024-11-11T12:42:34,853 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/bcdff7e0fcf647f897f1d47b9505269d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/bcdff7e0fcf647f897f1d47b9505269d 2024-11-11T12:42:34,855 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a9bea331eb174aca81d527817ca1a850 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a9bea331eb174aca81d527817ca1a850 2024-11-11T12:42:34,857 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/166b9b6995b54d138493bec0a6fca487 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/166b9b6995b54d138493bec0a6fca487 2024-11-11T12:42:34,859 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/d494ef56cfdb4df2bdb02ef5c74ad3e9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/d494ef56cfdb4df2bdb02ef5c74ad3e9 2024-11-11T12:42:34,860 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/765c8385ea7b4c0b854998f3e0b4f349 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/765c8385ea7b4c0b854998f3e0b4f349 2024-11-11T12:42:34,862 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a2318b042e7f4f39a1e067bc8331d894 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a2318b042e7f4f39a1e067bc8331d894 2024-11-11T12:42:34,863 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3ae9b59b11c640458fbac5bef7121d57 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3ae9b59b11c640458fbac5bef7121d57 2024-11-11T12:42:34,865 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/283e2f8e02214eaa899b1711f6b593f8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/283e2f8e02214eaa899b1711f6b593f8 2024-11-11T12:42:34,866 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/a7b5b9fcc1ae4d17bd9ed2e2a6d19874 2024-11-11T12:42:34,868 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/ee63ce79a0b94671a7bb11f0e6e27b51 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/ee63ce79a0b94671a7bb11f0e6e27b51 2024-11-11T12:42:34,869 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/4d96e830bec447378381f91837dd2b11 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/4d96e830bec447378381f91837dd2b11 2024-11-11T12:42:34,870 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e87e14a8a1f946c580e243ab25436ed1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e87e14a8a1f946c580e243ab25436ed1 2024-11-11T12:42:34,871 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/7be270f6aa074fce9a2a4ad6607a1a2d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/7be270f6aa074fce9a2a4ad6607a1a2d 2024-11-11T12:42:34,873 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e8e3beefa1354622b0cc08bf42e9692c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e8e3beefa1354622b0cc08bf42e9692c 2024-11-11T12:42:34,874 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1ec361ec2e3d48a98cda2f36ca7821a9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1ec361ec2e3d48a98cda2f36ca7821a9 2024-11-11T12:42:34,875 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3e76a847ce994269951a37bdb3582d4a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/3e76a847ce994269951a37bdb3582d4a 2024-11-11T12:42:34,877 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/fefe375360c544dc9856411b739b12c4 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/fefe375360c544dc9856411b739b12c4 2024-11-11T12:42:34,878 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/739907d282374f2192d062a815fd57f1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/739907d282374f2192d062a815fd57f1 2024-11-11T12:42:34,879 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1e2db1c5e53a47aa8ac8e05d29777b52 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/1e2db1c5e53a47aa8ac8e05d29777b52 2024-11-11T12:42:34,881 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f454b82b5f9e4010a2a7acd0ed273374 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f454b82b5f9e4010a2a7acd0ed273374 2024-11-11T12:42:34,882 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/2d4522783b554145a91d13ddd0e85ca6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/2d4522783b554145a91d13ddd0e85ca6 2024-11-11T12:42:34,883 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f1d57e86fe624dd1a68b6e5a8b55e93b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/f1d57e86fe624dd1a68b6e5a8b55e93b 2024-11-11T12:42:34,885 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/1dd50a8f5b89415298edce14eb09c231, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/fda44a3cde0744029d7df9ae245229d0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/77dfce55f7394e35b8be9f9ec3e6c86b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7950aff868534949b29b2225b62ed5d9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/92d4c630cafc4f91aadd852963e842e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/5fd8e06e46634be0b0bd1656640cdb9e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3303e534e1c445468ccc336c93e7e673, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4eb272fbc7204185959c5dc2dc912e3b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/a063a0262e474bc78688ce0ee4f2a47b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/6bcb7f9e74f24ca3819a3ce212032f4f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ab2767badf54475398556da5efba2e67, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/0adffd5fff784cb487b5d658071d36fd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ac2a38fbd7d447cda4b892cd813b4dcb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/18c17a8f60d84a02b608056281c88acf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/362afab335fa4846804cd20430e1271b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4c152f97c7714a0e934397202d9a24da, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/709db34be3634b46994b53e39fe4c178, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3f6c7512cd7f4d309cbffef8c850b1f0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7652f640134248d1b0e1ecdeadbb2136, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7dc7e61327d64f6891911bfc94009008, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/912f1c156903407abef8405e007cc532, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/da3aa2b17d014733a0590f98885051d2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/f4a92b570cc74bd8bf27bb3229f7d276, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/51f91545aec7449291d32c6ba786c1bc] to archive 2024-11-11T12:42:34,886 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:42:34,888 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/1dd50a8f5b89415298edce14eb09c231 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/1dd50a8f5b89415298edce14eb09c231 2024-11-11T12:42:34,889 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/fda44a3cde0744029d7df9ae245229d0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/fda44a3cde0744029d7df9ae245229d0 2024-11-11T12:42:34,891 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/77dfce55f7394e35b8be9f9ec3e6c86b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/77dfce55f7394e35b8be9f9ec3e6c86b 2024-11-11T12:42:34,892 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7950aff868534949b29b2225b62ed5d9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7950aff868534949b29b2225b62ed5d9 2024-11-11T12:42:34,894 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/92d4c630cafc4f91aadd852963e842e5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/92d4c630cafc4f91aadd852963e842e5 2024-11-11T12:42:34,895 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/5fd8e06e46634be0b0bd1656640cdb9e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/5fd8e06e46634be0b0bd1656640cdb9e 2024-11-11T12:42:34,896 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3303e534e1c445468ccc336c93e7e673 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3303e534e1c445468ccc336c93e7e673 2024-11-11T12:42:34,899 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4eb272fbc7204185959c5dc2dc912e3b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4eb272fbc7204185959c5dc2dc912e3b 2024-11-11T12:42:34,900 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/a063a0262e474bc78688ce0ee4f2a47b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/a063a0262e474bc78688ce0ee4f2a47b 2024-11-11T12:42:34,902 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/6bcb7f9e74f24ca3819a3ce212032f4f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/6bcb7f9e74f24ca3819a3ce212032f4f 2024-11-11T12:42:34,903 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ab2767badf54475398556da5efba2e67 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ab2767badf54475398556da5efba2e67 2024-11-11T12:42:34,904 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/0adffd5fff784cb487b5d658071d36fd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/0adffd5fff784cb487b5d658071d36fd 2024-11-11T12:42:34,906 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ac2a38fbd7d447cda4b892cd813b4dcb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/ac2a38fbd7d447cda4b892cd813b4dcb 2024-11-11T12:42:34,907 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/18c17a8f60d84a02b608056281c88acf to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/18c17a8f60d84a02b608056281c88acf 2024-11-11T12:42:34,909 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/362afab335fa4846804cd20430e1271b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/362afab335fa4846804cd20430e1271b 2024-11-11T12:42:34,910 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4c152f97c7714a0e934397202d9a24da to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/4c152f97c7714a0e934397202d9a24da 2024-11-11T12:42:34,912 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/709db34be3634b46994b53e39fe4c178 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/709db34be3634b46994b53e39fe4c178 2024-11-11T12:42:34,913 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3f6c7512cd7f4d309cbffef8c850b1f0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/3f6c7512cd7f4d309cbffef8c850b1f0 2024-11-11T12:42:34,914 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7652f640134248d1b0e1ecdeadbb2136 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7652f640134248d1b0e1ecdeadbb2136 2024-11-11T12:42:34,916 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7dc7e61327d64f6891911bfc94009008 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/7dc7e61327d64f6891911bfc94009008 2024-11-11T12:42:34,917 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/912f1c156903407abef8405e007cc532 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/912f1c156903407abef8405e007cc532 2024-11-11T12:42:34,919 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/da3aa2b17d014733a0590f98885051d2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/da3aa2b17d014733a0590f98885051d2 2024-11-11T12:42:34,920 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/f4a92b570cc74bd8bf27bb3229f7d276 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/f4a92b570cc74bd8bf27bb3229f7d276 2024-11-11T12:42:34,922 DEBUG [StoreCloser-TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/51f91545aec7449291d32c6ba786c1bc to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/51f91545aec7449291d32c6ba786c1bc 2024-11-11T12:42:34,932 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/recovered.edits/384.seqid, newMaxSeqId=384, maxSeqId=4 2024-11-11T12:42:34,933 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9. 2024-11-11T12:42:34,933 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] regionserver.HRegion(1635): Region close journal for fe7e7af7c234f1775e0b775751ee14f9: 2024-11-11T12:42:34,934 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=64}] handler.UnassignRegionHandler(170): Closed fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:34,935 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=63 updating hbase:meta row=fe7e7af7c234f1775e0b775751ee14f9, regionState=CLOSED 2024-11-11T12:42:34,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-11-11T12:42:34,937 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; CloseRegionProcedure fe7e7af7c234f1775e0b775751ee14f9, server=32e78532c8b1,44673,1731328897232 in 1.9980 sec 2024-11-11T12:42:34,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=63, resume processing ppid=62 2024-11-11T12:42:34,938 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, ppid=62, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=fe7e7af7c234f1775e0b775751ee14f9, UNASSIGN in 2.0030 sec 2024-11-11T12:42:34,940 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=62, resume processing ppid=61 2024-11-11T12:42:34,940 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, ppid=61, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 2.0080 sec 2024-11-11T12:42:34,941 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328954941"}]},"ts":"1731328954941"} 2024-11-11T12:42:34,942 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-11T12:42:34,947 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-11T12:42:34,949 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 2.0290 sec 2024-11-11T12:42:34,957 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T12:42:35,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=61 2024-11-11T12:42:35,031 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 61 completed 2024-11-11T12:42:35,032 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-11T12:42:35,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:35,034 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=65, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:35,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-11T12:42:35,034 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=65, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:35,037 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,039 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/recovered.edits] 2024-11-11T12:42:35,043 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/372cef4ec4fd419db21586744a3365da to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/372cef4ec4fd419db21586744a3365da 2024-11-11T12:42:35,044 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/3e148c40c8fc4039a3c8a40ba2499b22 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/3e148c40c8fc4039a3c8a40ba2499b22 2024-11-11T12:42:35,045 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5af8e051e53f4ec08224ffed0077d5ce to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/5af8e051e53f4ec08224ffed0077d5ce 2024-11-11T12:42:35,046 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/d575a4801de741689be30a8189991bba to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/A/d575a4801de741689be30a8189991bba 2024-11-11T12:42:35,049 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/07804f04c1c64b198aee7a90736e352d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/07804f04c1c64b198aee7a90736e352d 2024-11-11T12:42:35,050 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/33782e22c2f041ec9fb77884e1507116 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/33782e22c2f041ec9fb77884e1507116 2024-11-11T12:42:35,051 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/805b5f0253824cad9ccf819cb9963b24 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/805b5f0253824cad9ccf819cb9963b24 2024-11-11T12:42:35,053 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e001acf978fb42f6ace95e76621a7220 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/B/e001acf978fb42f6ace95e76621a7220 2024-11-11T12:42:35,055 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/04bc5120c6794389b100cc8f4d933df9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/04bc5120c6794389b100cc8f4d933df9 2024-11-11T12:42:35,057 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/21a2d5103301406ea84170d91a3283f3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/21a2d5103301406ea84170d91a3283f3 2024-11-11T12:42:35,058 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/c77f525397d94d26a8e358b109860019 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/c77f525397d94d26a8e358b109860019 2024-11-11T12:42:35,059 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/e80f3aea161640858483d71db6130af0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/C/e80f3aea161640858483d71db6130af0 2024-11-11T12:42:35,063 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/recovered.edits/384.seqid to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9/recovered.edits/384.seqid 2024-11-11T12:42:35,063 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,063 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-11T12:42:35,064 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-11T12:42:35,068 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-11T12:42:35,072 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111047424db6050469998085695f1c64cd3_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111047424db6050469998085695f1c64cd3_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,073 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111094b1ec0d0ae47cf83877e94ed8a003c_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111094b1ec0d0ae47cf83877e94ed8a003c_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,075 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110ccabba8b64a460c85ec32c97d72aae0_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110ccabba8b64a460c85ec32c97d72aae0_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,076 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411111cd423ef189247cfa33ae262d3357b06_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411111cd423ef189247cfa33ae262d3357b06_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,078 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111210c6af68b6e4001964469bdcda8e8b5_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111210c6af68b6e4001964469bdcda8e8b5_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,079 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411113b6a8c6e43fc461986a910f9b005ee60_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411113b6a8c6e43fc461986a910f9b005ee60_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,081 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111441365e8b1ef4250b696ac7545ed85a4_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111441365e8b1ef4250b696ac7545ed85a4_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,083 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111548f3cf6f0f245eca21c73ace45ec970_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111548f3cf6f0f245eca21c73ace45ec970_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,084 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115af66c9694ea4730822ccd68a02bd47a_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115af66c9694ea4730822ccd68a02bd47a_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,086 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115b6de3aa6dbc439fbdd94596c397ea85_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115b6de3aa6dbc439fbdd94596c397ea85_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,087 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111165a16b3ad2554398af2d341fa6d92ba3_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111165a16b3ad2554398af2d341fa6d92ba3_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,088 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111175d22b3a75324094a27078228fabc92f_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111175d22b3a75324094a27078228fabc92f_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,090 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111189d03073fa94438c858bdc5b3f00b70c_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111189d03073fa94438c858bdc5b3f00b70c_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,092 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111992d996e999c4068bc22c35d2fadd5af_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111992d996e999c4068bc22c35d2fadd5af_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,093 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119e7678264a764d5281dd796e2f4b5ea3_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119e7678264a764d5281dd796e2f4b5ea3_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,095 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ab704bfade144584a1335ab8775150c4_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ab704bfade144584a1335ab8775150c4_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,096 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b925560164be4cd79960e46313f6b65d_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b925560164be4cd79960e46313f6b65d_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,097 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b9b698c18e7940ebbc8250c6c827bf67_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b9b698c18e7940ebbc8250c6c827bf67_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,099 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c7d0554a867243948bdc921b42e7a96f_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c7d0554a867243948bdc921b42e7a96f_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,100 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e92a64019b514c8d8b649dc18d81ab30_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e92a64019b514c8d8b649dc18d81ab30_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,101 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111fb01ce038a7e4872a2823a9657a99009_fe7e7af7c234f1775e0b775751ee14f9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111fb01ce038a7e4872a2823a9657a99009_fe7e7af7c234f1775e0b775751ee14f9 2024-11-11T12:42:35,102 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-11T12:42:35,105 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=65, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:35,108 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-11T12:42:35,111 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-11T12:42:35,112 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=65, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:35,113 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-11T12:42:35,113 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731328955113"}]},"ts":"9223372036854775807"} 2024-11-11T12:42:35,115 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-11T12:42:35,115 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => fe7e7af7c234f1775e0b775751ee14f9, NAME => 'TestAcidGuarantees,,1731328928565.fe7e7af7c234f1775e0b775751ee14f9.', STARTKEY => '', ENDKEY => ''}] 2024-11-11T12:42:35,115 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-11T12:42:35,115 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731328955115"}]},"ts":"9223372036854775807"} 2024-11-11T12:42:35,117 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-11T12:42:35,120 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=65, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:35,121 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 88 msec 2024-11-11T12:42:35,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=65 2024-11-11T12:42:35,135 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 65 completed 2024-11-11T12:42:35,150 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobMixedAtomicity Thread=242 (was 241) Potentially hanging thread: hconnection-0x23ab8f3b-shared-pool-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1127917811_22 at /127.0.0.1:49454 [Waiting for operation #382] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1424648804_22 at /127.0.0.1:40766 [Waiting for operation #117] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x23ab8f3b-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1127917811_22 at /127.0.0.1:40782 [Waiting for operation #120] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1127917811_22 at /127.0.0.1:55010 [Waiting for operation #370] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1424648804_22 at /127.0.0.1:55002 [Waiting for operation #367] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x23ab8f3b-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x23ab8f3b-shared-pool-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=470 (was 462) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=851 (was 754) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=1799 (was 2451) 2024-11-11T12:42:35,162 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=242, OpenFileDescriptor=470, MaxFileDescriptor=1048576, SystemLoadAverage=851, ProcessCount=11, AvailableMemoryMB=1799 2024-11-11T12:42:35,164 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:42:35,165 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:42:35,166 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-11T12:42:35,168 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T12:42:35,168 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:35,168 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 66 2024-11-11T12:42:35,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-11T12:42:35,169 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T12:42:35,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742080_1256 (size=960) 2024-11-11T12:42:35,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-11T12:42:35,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-11T12:42:35,584 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:42:35,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742081_1257 (size=53) 2024-11-11T12:42:35,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-11T12:42:35,996 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:42:35,996 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 10680aa1d1802ca2e3b6db31ab7f417e, disabling compactions & flushes 2024-11-11T12:42:35,996 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:35,996 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:35,996 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. after waiting 0 ms 2024-11-11T12:42:35,996 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:35,996 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:35,996 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:35,997 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T12:42:35,998 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731328955997"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731328955997"}]},"ts":"1731328955997"} 2024-11-11T12:42:35,999 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T12:42:36,000 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T12:42:36,000 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328956000"}]},"ts":"1731328956000"} 2024-11-11T12:42:36,001 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-11T12:42:36,008 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=10680aa1d1802ca2e3b6db31ab7f417e, ASSIGN}] 2024-11-11T12:42:36,009 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=10680aa1d1802ca2e3b6db31ab7f417e, ASSIGN 2024-11-11T12:42:36,010 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=67, ppid=66, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=10680aa1d1802ca2e3b6db31ab7f417e, ASSIGN; state=OFFLINE, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=false 2024-11-11T12:42:36,161 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=10680aa1d1802ca2e3b6db31ab7f417e, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:36,162 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=68, ppid=67, state=RUNNABLE; OpenRegionProcedure 10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:42:36,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-11T12:42:36,313 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:36,321 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:36,321 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7285): Opening region: {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:42:36,322 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,322 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:42:36,322 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7327): checking encryption for 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,322 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(7330): checking classloading for 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,324 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,326 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:36,326 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10680aa1d1802ca2e3b6db31ab7f417e columnFamilyName A 2024-11-11T12:42:36,326 DEBUG [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:36,327 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.HStore(327): Store=10680aa1d1802ca2e3b6db31ab7f417e/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:36,327 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,328 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:36,328 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10680aa1d1802ca2e3b6db31ab7f417e columnFamilyName B 2024-11-11T12:42:36,328 DEBUG [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:36,329 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.HStore(327): Store=10680aa1d1802ca2e3b6db31ab7f417e/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:36,329 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,330 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:42:36,331 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 10680aa1d1802ca2e3b6db31ab7f417e columnFamilyName C 2024-11-11T12:42:36,331 DEBUG [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:42:36,331 INFO [StoreOpener-10680aa1d1802ca2e3b6db31ab7f417e-1 {}] regionserver.HStore(327): Store=10680aa1d1802ca2e3b6db31ab7f417e/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:42:36,331 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:36,332 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,334 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,343 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:42:36,350 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1085): writing seq id for 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:36,356 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:42:36,357 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1102): Opened 10680aa1d1802ca2e3b6db31ab7f417e; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67774864, jitterRate=0.009924173355102539}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:42:36,358 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegion(1001): Region open journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:36,363 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., pid=68, masterSystemTime=1731328956313 2024-11-11T12:42:36,365 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:36,365 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=68}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:36,366 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=67 updating hbase:meta row=10680aa1d1802ca2e3b6db31ab7f417e, regionState=OPEN, openSeqNum=2, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:36,369 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=68, resume processing ppid=67 2024-11-11T12:42:36,369 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, ppid=67, state=SUCCESS; OpenRegionProcedure 10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 in 205 msec 2024-11-11T12:42:36,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=66 2024-11-11T12:42:36,373 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=66, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=10680aa1d1802ca2e3b6db31ab7f417e, ASSIGN in 361 msec 2024-11-11T12:42:36,374 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T12:42:36,374 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328956374"}]},"ts":"1731328956374"} 2024-11-11T12:42:36,375 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-11T12:42:36,378 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=66, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T12:42:36,380 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.2130 sec 2024-11-11T12:42:37,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=66 2024-11-11T12:42:37,273 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 66 completed 2024-11-11T12:42:37,275 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58341641 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17b6adc5 2024-11-11T12:42:37,278 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a569490, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,279 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,281 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34814, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,282 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T12:42:37,282 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:44552, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T12:42:37,284 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x44645c55 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@669e1999 2024-11-11T12:42:37,287 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6862e3ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,288 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x64ee0130 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@72aa9ee5 2024-11-11T12:42:37,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d296fed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,292 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x683b64c3 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ec09297 2024-11-11T12:42:37,295 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@8d0caa5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,296 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x07e55eb7 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4dfb20f6 2024-11-11T12:42:37,299 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43f04e0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,300 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x03a703d2 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@17cf7fc0 2024-11-11T12:42:37,302 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@560ec309, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,303 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78b04266 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5886c0f2 2024-11-11T12:42:37,307 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb04aeb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,308 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x088aa519 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@66e575aa 2024-11-11T12:42:37,313 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a0e9c8f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,314 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5e998dd3 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@131ceb8f 2024-11-11T12:42:37,316 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d68f787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,317 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e4c79b8 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a78bf6d 2024-11-11T12:42:37,321 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e6bf6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,322 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d1403c3 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@328852db 2024-11-11T12:42:37,324 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1730a60f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:42:37,328 DEBUG [hconnection-0x320dea6a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,329 DEBUG [hconnection-0x660327a6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,329 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34828, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,329 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:37,330 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34840, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees 2024-11-11T12:42:37,331 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:37,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-11T12:42:37,331 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=69, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:37,332 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=70, ppid=69, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:37,332 DEBUG [hconnection-0x62cdad86-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,333 DEBUG [hconnection-0x3ef4d1bb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,333 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,334 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34848, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,336 DEBUG [hconnection-0x539e15d1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,337 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34860, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,338 DEBUG [hconnection-0x4196845e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,338 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,341 DEBUG [hconnection-0x1bd56dac-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,341 DEBUG [hconnection-0x7ba186cb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,341 DEBUG [hconnection-0x67406ff5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,342 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,342 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,342 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34896, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,343 DEBUG [hconnection-0xa23afe1-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:42:37,344 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:34906, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:42:37,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:37,348 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-11T12:42:37,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:37,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:37,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:37,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:37,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:37,349 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:37,367 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329017365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,367 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329017365, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329017366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,368 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329017366, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,369 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329017368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5071fbe10b9f4c47b82b8d62a7ccb71e is 50, key is test_row_0/A:col10/1731328957348/Put/seqid=0 2024-11-11T12:42:37,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742082_1258 (size=12001) 2024-11-11T12:42:37,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-11T12:42:37,433 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5071fbe10b9f4c47b82b8d62a7ccb71e 2024-11-11T12:42:37,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/a41c0bc9060a4226a20d1aeb51e58d58 is 50, key is test_row_0/B:col10/1731328957348/Put/seqid=0 2024-11-11T12:42:37,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329017468, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,472 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329017469, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329017473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,475 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329017473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,477 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329017476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,488 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,488 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-11T12:42:37,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:37,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:37,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:37,489 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742083_1259 (size=12001) 2024-11-11T12:42:37,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-11T12:42:37,641 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,642 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-11T12:42:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:37,642 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:37,642 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,643 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329017673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,674 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329017673, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,677 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329017676, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,678 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329017677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,682 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329017681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,795 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,796 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-11T12:42:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:37,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:37,796 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,797 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,927 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/a41c0bc9060a4226a20d1aeb51e58d58 2024-11-11T12:42:37,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-11T12:42:37,949 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,952 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-11T12:42:37,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:37,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:37,952 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:37,952 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] handler.RSProcedureHandler(58): pid=70 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=70 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=70 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:37,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/29115167d1244af5898d5ef35aaf7016 is 50, key is test_row_0/C:col10/1731328957348/Put/seqid=0 2024-11-11T12:42:37,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742084_1260 (size=12001) 2024-11-11T12:42:37,975 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/29115167d1244af5898d5ef35aaf7016 2024-11-11T12:42:37,977 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329017977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,978 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329017978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,979 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329017978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,981 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329017981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,982 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5071fbe10b9f4c47b82b8d62a7ccb71e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5071fbe10b9f4c47b82b8d62a7ccb71e 2024-11-11T12:42:37,985 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:37,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329017985, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:37,988 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5071fbe10b9f4c47b82b8d62a7ccb71e, entries=150, sequenceid=15, filesize=11.7 K 2024-11-11T12:42:37,989 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/a41c0bc9060a4226a20d1aeb51e58d58 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a41c0bc9060a4226a20d1aeb51e58d58 2024-11-11T12:42:37,994 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a41c0bc9060a4226a20d1aeb51e58d58, entries=150, sequenceid=15, filesize=11.7 K 2024-11-11T12:42:37,995 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/29115167d1244af5898d5ef35aaf7016 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/29115167d1244af5898d5ef35aaf7016 2024-11-11T12:42:38,001 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/29115167d1244af5898d5ef35aaf7016, entries=150, sequenceid=15, filesize=11.7 K 2024-11-11T12:42:38,002 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 10680aa1d1802ca2e3b6db31ab7f417e in 653ms, sequenceid=15, compaction requested=false 2024-11-11T12:42:38,002 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:38,105 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,105 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=70 2024-11-11T12:42:38,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:38,106 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:42:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:38,106 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:38,111 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/8c401c439c7b4de1a54fb48a5885cb82 is 50, key is test_row_0/A:col10/1731328957365/Put/seqid=0 2024-11-11T12:42:38,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742085_1261 (size=12001) 2024-11-11T12:42:38,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-11T12:42:38,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:38,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:38,492 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:4533) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.batchMutate(HRegion.java:4464) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4953) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4947) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.mutate(HRegion.java:4943) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3233) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329018488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,494 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329018491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329018492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329018492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,495 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329018492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,519 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/8c401c439c7b4de1a54fb48a5885cb82 2024-11-11T12:42:38,530 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/a4f8803627a6452bb82abb338b4c579a is 50, key is test_row_0/B:col10/1731328957365/Put/seqid=0 2024-11-11T12:42:38,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742086_1262 (size=12001) 2024-11-11T12:42:38,591 ERROR [LeaseRenewer:jenkins@localhost:42421 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:42421,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:38,594 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329018593, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,595 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329018595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,604 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329018604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,609 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329018609, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,612 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329018612, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,796 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329018796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329018797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,808 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329018807, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,812 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329018811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:38,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329018814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:38,941 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/a4f8803627a6452bb82abb338b4c579a 2024-11-11T12:42:38,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/4e04b9b1c16448b9958cfa1011c7fc52 is 50, key is test_row_0/C:col10/1731328957365/Put/seqid=0 2024-11-11T12:42:38,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742087_1263 (size=12001) 2024-11-11T12:42:39,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329019098, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,106 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329019103, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,114 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329019112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329019118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,121 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329019118, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,376 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=38 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/4e04b9b1c16448b9958cfa1011c7fc52 2024-11-11T12:42:39,388 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/8c401c439c7b4de1a54fb48a5885cb82 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/8c401c439c7b4de1a54fb48a5885cb82 2024-11-11T12:42:39,393 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/8c401c439c7b4de1a54fb48a5885cb82, entries=150, sequenceid=38, filesize=11.7 K 2024-11-11T12:42:39,399 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/a4f8803627a6452bb82abb338b4c579a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a4f8803627a6452bb82abb338b4c579a 2024-11-11T12:42:39,405 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a4f8803627a6452bb82abb338b4c579a, entries=150, sequenceid=38, filesize=11.7 K 2024-11-11T12:42:39,406 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/4e04b9b1c16448b9958cfa1011c7fc52 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e04b9b1c16448b9958cfa1011c7fc52 2024-11-11T12:42:39,411 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e04b9b1c16448b9958cfa1011c7fc52, entries=150, sequenceid=38, filesize=11.7 K 2024-11-11T12:42:39,412 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 10680aa1d1802ca2e3b6db31ab7f417e in 1306ms, sequenceid=38, compaction requested=false 2024-11-11T12:42:39,412 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:42421 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:42421,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:39,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:39,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:39,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-11T12:42:39,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-11-11T12:42:39,417 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=70, resume processing ppid=69 2024-11-11T12:42:39,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0820 sec 2024-11-11T12:42:39,422 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=69, table=TestAcidGuarantees in 2.0910 sec 2024-11-11T12:42:39,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=69 2024-11-11T12:42:39,437 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 69 completed 2024-11-11T12:42:39,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:39,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees 2024-11-11T12:42:39,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-11T12:42:39,442 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:39,443 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=71, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:39,443 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:39,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-11T12:42:39,595 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,595 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=72 2024-11-11T12:42:39,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:39,596 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:42:39,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:39,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:39,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:39,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:39,596 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:39,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:39,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4d43863061d74c57a6721080675db5e6 is 50, key is test_row_0/A:col10/1731328958490/Put/seqid=0 2024-11-11T12:42:39,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742088_1264 (size=12001) 2024-11-11T12:42:39,606 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4d43863061d74c57a6721080675db5e6 2024-11-11T12:42:39,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:39,610 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:39,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/7caf14b30def4c4fb111331c4ca89e3d is 50, key is test_row_0/B:col10/1731328958490/Put/seqid=0 2024-11-11T12:42:39,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742089_1265 (size=12001) 2024-11-11T12:42:39,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329019627, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,630 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329019628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,631 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329019629, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,633 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,633 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329019630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329019630, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329019731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,732 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329019731, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,734 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329019733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,735 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329019734, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-11T12:42:39,746 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329019744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329019934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,936 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329019935, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,937 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329019936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,938 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329019937, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:39,960 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:39,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329019952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,024 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/7caf14b30def4c4fb111331c4ca89e3d 2024-11-11T12:42:40,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1aecc086061746df95aa7bccc31da5f7 is 50, key is test_row_0/C:col10/1731328958490/Put/seqid=0 2024-11-11T12:42:40,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742090_1266 (size=12001) 2024-11-11T12:42:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-11T12:42:40,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329020237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,241 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329020239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329020241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329020241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,265 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329020264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,443 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1aecc086061746df95aa7bccc31da5f7 2024-11-11T12:42:40,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4d43863061d74c57a6721080675db5e6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4d43863061d74c57a6721080675db5e6 2024-11-11T12:42:40,477 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4d43863061d74c57a6721080675db5e6, entries=150, sequenceid=51, filesize=11.7 K 2024-11-11T12:42:40,481 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T12:42:40,485 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/7caf14b30def4c4fb111331c4ca89e3d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7caf14b30def4c4fb111331c4ca89e3d 2024-11-11T12:42:40,492 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7caf14b30def4c4fb111331c4ca89e3d, entries=150, sequenceid=51, filesize=11.7 K 2024-11-11T12:42:40,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1aecc086061746df95aa7bccc31da5f7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1aecc086061746df95aa7bccc31da5f7 2024-11-11T12:42:40,498 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1aecc086061746df95aa7bccc31da5f7, entries=150, sequenceid=51, filesize=11.7 K 2024-11-11T12:42:40,499 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 10680aa1d1802ca2e3b6db31ab7f417e in 902ms, sequenceid=51, compaction requested=true 2024-11-11T12:42:40,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:40,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:40,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-11T12:42:40,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-11-11T12:42:40,501 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-11-11T12:42:40,502 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0570 sec 2024-11-11T12:42:40,503 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=71, table=TestAcidGuarantees in 1.0630 sec 2024-11-11T12:42:40,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-11-11T12:42:40,544 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 71 completed 2024-11-11T12:42:40,546 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:40,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees 2024-11-11T12:42:40,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-11T12:42:40,554 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:40,554 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=73, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:40,555 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=74, ppid=73, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:40,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-11T12:42:40,707 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,707 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=74 2024-11-11T12:42:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:40,708 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:42:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:40,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:40,709 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:40,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/6f145d7bd20d430caab2e5c3d892cc87 is 50, key is test_row_0/A:col10/1731328959629/Put/seqid=0 2024-11-11T12:42:40,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742091_1267 (size=12001) 2024-11-11T12:42:40,723 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/6f145d7bd20d430caab2e5c3d892cc87 2024-11-11T12:42:40,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/6f4ae46e7d694160ac91a0e1e11d9f71 is 50, key is test_row_0/B:col10/1731328959629/Put/seqid=0 2024-11-11T12:42:40,744 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:40,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:40,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742092_1268 (size=12001) 2024-11-11T12:42:40,747 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/6f4ae46e7d694160ac91a0e1e11d9f71 2024-11-11T12:42:40,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/360c5c2b917446b0b1b7f4ca19762e84 is 50, key is test_row_0/C:col10/1731328959629/Put/seqid=0 2024-11-11T12:42:40,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329020760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,765 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329020761, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,765 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329020763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,766 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329020763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329020770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742093_1269 (size=12001) 2024-11-11T12:42:40,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-11T12:42:40,872 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329020869, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329020872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,875 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329020872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:40,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:40,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329020873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,020 DEBUG [master/32e78532c8b1:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 69716d04bd60881dfce8676dd10b689d changed from -1.0 to 0.0, refreshing cache 2024-11-11T12:42:41,078 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329021077, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329021078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329021078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329021078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-11T12:42:41,183 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=74 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/360c5c2b917446b0b1b7f4ca19762e84 2024-11-11T12:42:41,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/6f145d7bd20d430caab2e5c3d892cc87 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/6f145d7bd20d430caab2e5c3d892cc87 2024-11-11T12:42:41,197 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/6f145d7bd20d430caab2e5c3d892cc87, entries=150, sequenceid=74, filesize=11.7 K 2024-11-11T12:42:41,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/6f4ae46e7d694160ac91a0e1e11d9f71 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/6f4ae46e7d694160ac91a0e1e11d9f71 2024-11-11T12:42:41,203 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/6f4ae46e7d694160ac91a0e1e11d9f71, entries=150, sequenceid=74, filesize=11.7 K 2024-11-11T12:42:41,206 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/360c5c2b917446b0b1b7f4ca19762e84 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/360c5c2b917446b0b1b7f4ca19762e84 2024-11-11T12:42:41,214 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/360c5c2b917446b0b1b7f4ca19762e84, entries=150, sequenceid=74, filesize=11.7 K 2024-11-11T12:42:41,215 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 10680aa1d1802ca2e3b6db31ab7f417e in 507ms, sequenceid=74, compaction requested=true 2024-11-11T12:42:41,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:41,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:41,216 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=74}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=74 2024-11-11T12:42:41,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=74 2024-11-11T12:42:41,219 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=74, resume processing ppid=73 2024-11-11T12:42:41,219 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, ppid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 664 msec 2024-11-11T12:42:41,221 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=73, table=TestAcidGuarantees in 674 msec 2024-11-11T12:42:41,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-11T12:42:41,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:41,388 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:41,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:41,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:41,389 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:41,392 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:41,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:41,404 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/56f4d6f73f7d4532aba557561fc218a7 is 50, key is test_row_0/A:col10/1731328960761/Put/seqid=0 2024-11-11T12:42:41,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742094_1270 (size=12001) 2024-11-11T12:42:41,477 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329021474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,478 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329021474, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,481 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,481 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329021477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329021477, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,579 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329021579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329021580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329021582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329021582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=73 2024-11-11T12:42:41,657 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 73 completed 2024-11-11T12:42:41,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:41,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees 2024-11-11T12:42:41,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-11T12:42:41,666 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:41,667 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=75, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:41,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:41,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-11T12:42:41,778 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329021777, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,783 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329021782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,784 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329021783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,787 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329021785, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,788 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:41,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329021786, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,819 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-11T12:42:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:41,820 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:41,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:41,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:41,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/56f4d6f73f7d4532aba557561fc218a7 2024-11-11T12:42:41,839 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e15950b00bfc4254821faa5c162e709a is 50, key is test_row_0/B:col10/1731328960761/Put/seqid=0 2024-11-11T12:42:41,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742095_1271 (size=12001) 2024-11-11T12:42:41,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-11T12:42:41,976 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:41,977 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-11T12:42:41,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:41,977 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:41,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:41,978 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:41,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:41,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,087 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:42,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329022084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,088 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:42,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329022086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,093 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:42,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329022091, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:42,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329022093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,136 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,139 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-11T12:42:42,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:42,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,140 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e15950b00bfc4254821faa5c162e709a 2024-11-11T12:42:42,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/cab46268443f4a25843d8e9bf84cb241 is 50, key is test_row_0/C:col10/1731328960761/Put/seqid=0 2024-11-11T12:42:42,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-11T12:42:42,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742096_1272 (size=12001) 2024-11-11T12:42:42,292 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,292 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-11T12:42:42,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:42,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,293 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,446 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-11T12:42:42,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:42,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,447 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,447 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,590 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:42,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329022588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:42,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329022594, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,599 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-11T12:42:42,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:42,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,600 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] handler.RSProcedureHandler(58): pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=76 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=76 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:42,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329022600, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,602 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:42,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329022601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,687 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=89 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/cab46268443f4a25843d8e9bf84cb241 2024-11-11T12:42:42,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/56f4d6f73f7d4532aba557561fc218a7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/56f4d6f73f7d4532aba557561fc218a7 2024-11-11T12:42:42,697 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/56f4d6f73f7d4532aba557561fc218a7, entries=150, sequenceid=89, filesize=11.7 K 2024-11-11T12:42:42,698 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e15950b00bfc4254821faa5c162e709a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e15950b00bfc4254821faa5c162e709a 2024-11-11T12:42:42,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e15950b00bfc4254821faa5c162e709a, entries=150, sequenceid=89, filesize=11.7 K 2024-11-11T12:42:42,706 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/cab46268443f4a25843d8e9bf84cb241 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/cab46268443f4a25843d8e9bf84cb241 2024-11-11T12:42:42,711 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/cab46268443f4a25843d8e9bf84cb241, entries=150, sequenceid=89, filesize=11.7 K 2024-11-11T12:42:42,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for 10680aa1d1802ca2e3b6db31ab7f417e in 1324ms, sequenceid=89, compaction requested=true 2024-11-11T12:42:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:42,712 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-11T12:42:42,712 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-11T12:42:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:42,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:42,717 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-11T12:42:42,717 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/B is initiating minor compaction (all files) 2024-11-11T12:42:42,718 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/B in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,718 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a41c0bc9060a4226a20d1aeb51e58d58, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a4f8803627a6452bb82abb338b4c579a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7caf14b30def4c4fb111331c4ca89e3d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/6f4ae46e7d694160ac91a0e1e11d9f71, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e15950b00bfc4254821faa5c162e709a] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=58.6 K 2024-11-11T12:42:42,719 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 60005 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-11T12:42:42,719 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/A is initiating minor compaction (all files) 2024-11-11T12:42:42,719 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/A in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,719 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5071fbe10b9f4c47b82b8d62a7ccb71e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/8c401c439c7b4de1a54fb48a5885cb82, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4d43863061d74c57a6721080675db5e6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/6f145d7bd20d430caab2e5c3d892cc87, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/56f4d6f73f7d4532aba557561fc218a7] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=58.6 K 2024-11-11T12:42:42,720 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a41c0bc9060a4226a20d1aeb51e58d58, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731328957333 2024-11-11T12:42:42,720 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5071fbe10b9f4c47b82b8d62a7ccb71e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731328957333 2024-11-11T12:42:42,720 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8c401c439c7b4de1a54fb48a5885cb82, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731328957359 2024-11-11T12:42:42,720 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a4f8803627a6452bb82abb338b4c579a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731328957359 2024-11-11T12:42:42,721 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4d43863061d74c57a6721080675db5e6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731328958485 2024-11-11T12:42:42,721 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7caf14b30def4c4fb111331c4ca89e3d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731328958485 2024-11-11T12:42:42,721 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f145d7bd20d430caab2e5c3d892cc87, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731328959626 2024-11-11T12:42:42,722 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f4ae46e7d694160ac91a0e1e11d9f71, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731328959626 2024-11-11T12:42:42,722 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 56f4d6f73f7d4532aba557561fc218a7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731328960755 2024-11-11T12:42:42,722 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e15950b00bfc4254821faa5c162e709a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731328960755 2024-11-11T12:42:42,739 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#A#compaction#225 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:42,740 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/7791cfe3b4484fa9aa913b04a4310cac is 50, key is test_row_0/A:col10/1731328960761/Put/seqid=0 2024-11-11T12:42:42,740 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#B#compaction#226 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:42,741 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/87791b0c8e894d51b997cb9e5138a7cf is 50, key is test_row_0/B:col10/1731328960761/Put/seqid=0 2024-11-11T12:42:42,752 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:42,752 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=76 2024-11-11T12:42:42,752 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,753 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:42:42,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:42,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:42,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:42,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:42,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:42,753 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:42,761 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4245c5822be948ee82a4d35519cd2e80 is 50, key is test_row_0/A:col10/1731328961475/Put/seqid=0 2024-11-11T12:42:42,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742097_1273 (size=12173) 2024-11-11T12:42:42,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742098_1274 (size=12173) 2024-11-11T12:42:42,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-11T12:42:42,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742099_1275 (size=12001) 2024-11-11T12:42:42,787 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4245c5822be948ee82a4d35519cd2e80 2024-11-11T12:42:42,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/bb3a19b080f040aaa2ecc949c10db180 is 50, key is test_row_0/B:col10/1731328961475/Put/seqid=0 2024-11-11T12:42:42,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742100_1276 (size=12001) 2024-11-11T12:42:42,804 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/bb3a19b080f040aaa2ecc949c10db180 2024-11-11T12:42:42,815 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/5728761b36b74851a06dc6bdcf3bb579 is 50, key is test_row_0/C:col10/1731328961475/Put/seqid=0 2024-11-11T12:42:42,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742101_1277 (size=12001) 2024-11-11T12:42:42,844 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=111 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/5728761b36b74851a06dc6bdcf3bb579 2024-11-11T12:42:42,864 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4245c5822be948ee82a4d35519cd2e80 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4245c5822be948ee82a4d35519cd2e80 2024-11-11T12:42:42,871 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4245c5822be948ee82a4d35519cd2e80, entries=150, sequenceid=111, filesize=11.7 K 2024-11-11T12:42:42,872 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/bb3a19b080f040aaa2ecc949c10db180 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/bb3a19b080f040aaa2ecc949c10db180 2024-11-11T12:42:42,878 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/bb3a19b080f040aaa2ecc949c10db180, entries=150, sequenceid=111, filesize=11.7 K 2024-11-11T12:42:42,879 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/5728761b36b74851a06dc6bdcf3bb579 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/5728761b36b74851a06dc6bdcf3bb579 2024-11-11T12:42:42,885 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/5728761b36b74851a06dc6bdcf3bb579, entries=150, sequenceid=111, filesize=11.7 K 2024-11-11T12:42:42,886 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=0 B/0 for 10680aa1d1802ca2e3b6db31ab7f417e in 133ms, sequenceid=111, compaction requested=true 2024-11-11T12:42:42,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:42,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:42,886 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=76}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=76 2024-11-11T12:42:42,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=76 2024-11-11T12:42:42,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-11-11T12:42:42,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2210 sec 2024-11-11T12:42:42,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=75, table=TestAcidGuarantees in 1.2310 sec 2024-11-11T12:42:43,168 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/7791cfe3b4484fa9aa913b04a4310cac as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/7791cfe3b4484fa9aa913b04a4310cac 2024-11-11T12:42:43,173 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/A of 10680aa1d1802ca2e3b6db31ab7f417e into 7791cfe3b4484fa9aa913b04a4310cac(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:43,173 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:43,173 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/A, priority=11, startTime=1731328962712; duration=0sec 2024-11-11T12:42:43,173 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:43,173 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:A 2024-11-11T12:42:43,173 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 6 store files, 0 compacting, 6 eligible, 16 blocking 2024-11-11T12:42:43,174 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/87791b0c8e894d51b997cb9e5138a7cf as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/87791b0c8e894d51b997cb9e5138a7cf 2024-11-11T12:42:43,176 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 6 files of size 72006 starting at candidate #0 after considering 10 permutations with 10 in ratio 2024-11-11T12:42:43,176 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/C is initiating minor compaction (all files) 2024-11-11T12:42:43,176 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/C in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:43,176 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/29115167d1244af5898d5ef35aaf7016, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e04b9b1c16448b9958cfa1011c7fc52, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1aecc086061746df95aa7bccc31da5f7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/360c5c2b917446b0b1b7f4ca19762e84, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/cab46268443f4a25843d8e9bf84cb241, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/5728761b36b74851a06dc6bdcf3bb579] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=70.3 K 2024-11-11T12:42:43,178 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 29115167d1244af5898d5ef35aaf7016, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731328957333 2024-11-11T12:42:43,179 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4e04b9b1c16448b9958cfa1011c7fc52, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=38, earliestPutTs=1731328957359 2024-11-11T12:42:43,179 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1aecc086061746df95aa7bccc31da5f7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731328958485 2024-11-11T12:42:43,180 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 360c5c2b917446b0b1b7f4ca19762e84, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=74, earliestPutTs=1731328959626 2024-11-11T12:42:43,180 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/B of 10680aa1d1802ca2e3b6db31ab7f417e into 87791b0c8e894d51b997cb9e5138a7cf(size=11.9 K), total size for store is 23.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:43,181 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting cab46268443f4a25843d8e9bf84cb241, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731328960755 2024-11-11T12:42:43,181 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:43,181 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/B, priority=11, startTime=1731328962712; duration=0sec 2024-11-11T12:42:43,181 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:43,181 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:B 2024-11-11T12:42:43,181 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5728761b36b74851a06dc6bdcf3bb579, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1731328961472 2024-11-11T12:42:43,199 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#C#compaction#230 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:43,200 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/3e3d1b89abbd4a27af0049715f69bd82 is 50, key is test_row_0/C:col10/1731328961475/Put/seqid=0 2024-11-11T12:42:43,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742102_1278 (size=12207) 2024-11-11T12:42:43,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:43,611 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:42:43,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:43,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:43,612 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:43,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:43,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:43,613 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:43,616 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/3e3d1b89abbd4a27af0049715f69bd82 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/3e3d1b89abbd4a27af0049715f69bd82 2024-11-11T12:42:43,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/493bf71e828b49568f45863daeb1ba9e is 50, key is test_row_0/A:col10/1731328963611/Put/seqid=0 2024-11-11T12:42:43,622 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 6 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/C of 10680aa1d1802ca2e3b6db31ab7f417e into 3e3d1b89abbd4a27af0049715f69bd82(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:43,622 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:43,622 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/C, priority=10, startTime=1731328962712; duration=0sec 2024-11-11T12:42:43,623 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:43,623 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:C 2024-11-11T12:42:43,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742103_1279 (size=12001) 2024-11-11T12:42:43,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/493bf71e828b49568f45863daeb1ba9e 2024-11-11T12:42:43,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/72a52418574548efa9be0805e551761c is 50, key is test_row_0/B:col10/1731328963611/Put/seqid=0 2024-11-11T12:42:43,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742104_1280 (size=12001) 2024-11-11T12:42:43,638 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,638 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329023634, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329023635, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329023638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,641 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329023638, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329023739, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329023740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,743 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329023742, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,744 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329023743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=75 2024-11-11T12:42:43,771 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 75 completed 2024-11-11T12:42:43,773 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:43,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees 2024-11-11T12:42:43,774 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:43,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-11T12:42:43,775 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=77, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:43,775 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:43,784 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329023783, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,784 DEBUG [Thread-1183 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., hostname=32e78532c8b1,44673,1731328897232, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:43,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-11T12:42:43,926 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,927 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-11T12:42:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:43,927 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:43,927 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:43,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:43,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329023943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329023943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,946 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329023945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:43,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:43,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329023945, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,038 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/72a52418574548efa9be0805e551761c 2024-11-11T12:42:44,057 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c is 50, key is test_row_0/C:col10/1731328963611/Put/seqid=0 2024-11-11T12:42:44,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742105_1281 (size=12001) 2024-11-11T12:42:44,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-11T12:42:44,079 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,080 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-11T12:42:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,080 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,080 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,232 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,233 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-11T12:42:44,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:44,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,233 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,247 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329024246, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329024247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,248 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329024247, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,249 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329024248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-11T12:42:44,386 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,388 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-11T12:42:44,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:44,389 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,389 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] handler.RSProcedureHandler(58): pid=78 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,390 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=78 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=78 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:44,470 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c 2024-11-11T12:42:44,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/493bf71e828b49568f45863daeb1ba9e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/493bf71e828b49568f45863daeb1ba9e 2024-11-11T12:42:44,485 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/493bf71e828b49568f45863daeb1ba9e, entries=150, sequenceid=125, filesize=11.7 K 2024-11-11T12:42:44,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/72a52418574548efa9be0805e551761c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/72a52418574548efa9be0805e551761c 2024-11-11T12:42:44,494 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/72a52418574548efa9be0805e551761c, entries=150, sequenceid=125, filesize=11.7 K 2024-11-11T12:42:44,499 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c 2024-11-11T12:42:44,511 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c, entries=150, sequenceid=125, filesize=11.7 K 2024-11-11T12:42:44,511 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=147.60 KB/151140 for 10680aa1d1802ca2e3b6db31ab7f417e in 900ms, sequenceid=125, compaction requested=true 2024-11-11T12:42:44,512 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:44,512 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:44,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:44,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:44,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:44,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:44,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:44,512 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:44,512 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:44,514 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:44,514 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36175 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:44,514 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/A is initiating minor compaction (all files) 2024-11-11T12:42:44,514 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/B is initiating minor compaction (all files) 2024-11-11T12:42:44,514 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/A in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,515 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/B in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,515 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/7791cfe3b4484fa9aa913b04a4310cac, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4245c5822be948ee82a4d35519cd2e80, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/493bf71e828b49568f45863daeb1ba9e] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=35.3 K 2024-11-11T12:42:44,515 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/87791b0c8e894d51b997cb9e5138a7cf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/bb3a19b080f040aaa2ecc949c10db180, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/72a52418574548efa9be0805e551761c] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=35.3 K 2024-11-11T12:42:44,515 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7791cfe3b4484fa9aa913b04a4310cac, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731328960755 2024-11-11T12:42:44,515 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 87791b0c8e894d51b997cb9e5138a7cf, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=89, earliestPutTs=1731328960755 2024-11-11T12:42:44,516 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4245c5822be948ee82a4d35519cd2e80, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1731328961472 2024-11-11T12:42:44,516 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bb3a19b080f040aaa2ecc949c10db180, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1731328961472 2024-11-11T12:42:44,516 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 493bf71e828b49568f45863daeb1ba9e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1731328963609 2024-11-11T12:42:44,516 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 72a52418574548efa9be0805e551761c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1731328963609 2024-11-11T12:42:44,529 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#A#compaction#234 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:44,530 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/c3c42b6d12884e8f8f944516fa8e60fc is 50, key is test_row_0/A:col10/1731328963611/Put/seqid=0 2024-11-11T12:42:44,540 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#B#compaction#235 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:44,541 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/895d3ebeeaf4417f90c391195a0c697a is 50, key is test_row_0/B:col10/1731328963611/Put/seqid=0 2024-11-11T12:42:44,543 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,544 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=78 2024-11-11T12:42:44,544 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:44,544 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:42:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:44,545 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:44,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/792e1d393b634feca8dfbb5d88521a69 is 50, key is test_row_0/A:col10/1731328963628/Put/seqid=0 2024-11-11T12:42:44,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742106_1282 (size=12275) 2024-11-11T12:42:44,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742107_1283 (size=12275) 2024-11-11T12:42:44,634 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/c3c42b6d12884e8f8f944516fa8e60fc as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c3c42b6d12884e8f8f944516fa8e60fc 2024-11-11T12:42:44,645 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/895d3ebeeaf4417f90c391195a0c697a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/895d3ebeeaf4417f90c391195a0c697a 2024-11-11T12:42:44,646 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742108_1284 (size=12151) 2024-11-11T12:42:44,648 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/792e1d393b634feca8dfbb5d88521a69 2024-11-11T12:42:44,652 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/A of 10680aa1d1802ca2e3b6db31ab7f417e into c3c42b6d12884e8f8f944516fa8e60fc(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:44,652 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:44,652 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/A, priority=13, startTime=1731328964512; duration=0sec 2024-11-11T12:42:44,652 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:44,652 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:A 2024-11-11T12:42:44,653 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-11T12:42:44,653 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/B of 10680aa1d1802ca2e3b6db31ab7f417e into 895d3ebeeaf4417f90c391195a0c697a(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:44,653 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:44,653 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/B, priority=13, startTime=1731328964512; duration=0sec 2024-11-11T12:42:44,653 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:44,653 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:B 2024-11-11T12:42:44,654 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:42:44,654 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:42:44,654 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. because compaction request was cancelled 2024-11-11T12:42:44,654 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:C 2024-11-11T12:42:44,660 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/2400f65b7b5c4fbab23fdf96010a6331 is 50, key is test_row_0/B:col10/1731328963628/Put/seqid=0 2024-11-11T12:42:44,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742109_1285 (size=12151) 2024-11-11T12:42:44,668 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/2400f65b7b5c4fbab23fdf96010a6331 2024-11-11T12:42:44,682 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/39dcdf8b31474ffab0bcbea87fe9b339 is 50, key is test_row_0/C:col10/1731328963628/Put/seqid=0 2024-11-11T12:42:44,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742110_1286 (size=12151) 2024-11-11T12:42:44,750 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:44,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:44,760 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329024758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329024759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,761 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329024759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,762 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329024759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,863 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329024863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329024863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329024864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:44,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329024864, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:44,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-11T12:42:45,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329025065, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329025066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,067 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329025066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329025067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,110 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=151 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/39dcdf8b31474ffab0bcbea87fe9b339 2024-11-11T12:42:45,116 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/792e1d393b634feca8dfbb5d88521a69 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/792e1d393b634feca8dfbb5d88521a69 2024-11-11T12:42:45,120 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/792e1d393b634feca8dfbb5d88521a69, entries=150, sequenceid=151, filesize=11.9 K 2024-11-11T12:42:45,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/2400f65b7b5c4fbab23fdf96010a6331 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/2400f65b7b5c4fbab23fdf96010a6331 2024-11-11T12:42:45,128 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/2400f65b7b5c4fbab23fdf96010a6331, entries=150, sequenceid=151, filesize=11.9 K 2024-11-11T12:42:45,129 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/39dcdf8b31474ffab0bcbea87fe9b339 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/39dcdf8b31474ffab0bcbea87fe9b339 2024-11-11T12:42:45,134 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/39dcdf8b31474ffab0bcbea87fe9b339, entries=150, sequenceid=151, filesize=11.9 K 2024-11-11T12:42:45,135 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 10680aa1d1802ca2e3b6db31ab7f417e in 591ms, sequenceid=151, compaction requested=true 2024-11-11T12:42:45,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:45,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:45,135 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=78}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=78 2024-11-11T12:42:45,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=78 2024-11-11T12:42:45,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-11-11T12:42:45,137 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3610 sec 2024-11-11T12:42:45,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=77, table=TestAcidGuarantees in 1.3650 sec 2024-11-11T12:42:45,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:45,370 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-11T12:42:45,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:45,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:45,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:45,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:45,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:45,371 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:45,376 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/e27ac802f5f54604922e6441d022963c is 50, key is test_row_0/A:col10/1731328965369/Put/seqid=0 2024-11-11T12:42:45,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742111_1287 (size=9757) 2024-11-11T12:42:45,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329025418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,420 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329025418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,421 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329025419, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,421 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329025420, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,521 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329025521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,522 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329025521, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329025522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,524 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329025522, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,725 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329025724, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,729 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329025728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329025728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,729 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329025728, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:45,782 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/e27ac802f5f54604922e6441d022963c 2024-11-11T12:42:45,789 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/5046dfad2c514a2ead5febc73e7c9fc7 is 50, key is test_row_0/B:col10/1731328965369/Put/seqid=0 2024-11-11T12:42:45,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742112_1288 (size=9757) 2024-11-11T12:42:45,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=77 2024-11-11T12:42:45,882 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 77 completed 2024-11-11T12:42:45,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:45,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees 2024-11-11T12:42:45,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-11T12:42:45,886 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:45,886 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=79, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:45,887 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:45,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-11T12:42:46,027 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:46,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329026026, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,031 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:46,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329026030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,032 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:46,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329026030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,032 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:46,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329026032, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,038 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,038 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-11T12:42:46,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:46,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,039 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,039 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,188 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-11T12:42:46,191 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,191 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-11T12:42:46,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:46,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,192 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,192 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,194 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/5046dfad2c514a2ead5febc73e7c9fc7 2024-11-11T12:42:46,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/abef0f88bcc44447832300a29b52fafc is 50, key is test_row_0/C:col10/1731328965369/Put/seqid=0 2024-11-11T12:42:46,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742113_1289 (size=9757) 2024-11-11T12:42:46,344 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,345 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-11T12:42:46,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:46,345 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,345 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-11T12:42:46,498 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,499 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-11T12:42:46,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:46,499 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,500 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] handler.RSProcedureHandler(58): pid=80 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=80 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=80 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:46,533 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:46,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329026531, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,536 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:46,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329026534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329026535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:46,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329026536, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,610 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=165 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/abef0f88bcc44447832300a29b52fafc 2024-11-11T12:42:46,616 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/e27ac802f5f54604922e6441d022963c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/e27ac802f5f54604922e6441d022963c 2024-11-11T12:42:46,621 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/e27ac802f5f54604922e6441d022963c, entries=100, sequenceid=165, filesize=9.5 K 2024-11-11T12:42:46,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/5046dfad2c514a2ead5febc73e7c9fc7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5046dfad2c514a2ead5febc73e7c9fc7 2024-11-11T12:42:46,640 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5046dfad2c514a2ead5febc73e7c9fc7, entries=100, sequenceid=165, filesize=9.5 K 2024-11-11T12:42:46,641 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/abef0f88bcc44447832300a29b52fafc as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/abef0f88bcc44447832300a29b52fafc 2024-11-11T12:42:46,648 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/abef0f88bcc44447832300a29b52fafc, entries=100, sequenceid=165, filesize=9.5 K 2024-11-11T12:42:46,649 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 10680aa1d1802ca2e3b6db31ab7f417e in 1279ms, sequenceid=165, compaction requested=true 2024-11-11T12:42:46,649 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:46,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:46,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:46,649 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:46,649 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:46,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:46,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:46,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:46,649 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:46,650 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:46,651 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/B is initiating minor compaction (all files) 2024-11-11T12:42:46,651 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/B in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,651 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:46,651 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/895d3ebeeaf4417f90c391195a0c697a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/2400f65b7b5c4fbab23fdf96010a6331, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5046dfad2c514a2ead5febc73e7c9fc7] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=33.4 K 2024-11-11T12:42:46,651 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/A is initiating minor compaction (all files) 2024-11-11T12:42:46,651 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/A in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,651 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c3c42b6d12884e8f8f944516fa8e60fc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/792e1d393b634feca8dfbb5d88521a69, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/e27ac802f5f54604922e6441d022963c] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=33.4 K 2024-11-11T12:42:46,652 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 895d3ebeeaf4417f90c391195a0c697a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1731328963609 2024-11-11T12:42:46,652 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting c3c42b6d12884e8f8f944516fa8e60fc, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1731328963609 2024-11-11T12:42:46,652 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:46,652 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 2400f65b7b5c4fbab23fdf96010a6331, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1731328963628 2024-11-11T12:42:46,652 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=80 2024-11-11T12:42:46,652 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,653 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 792e1d393b634feca8dfbb5d88521a69, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1731328963628 2024-11-11T12:42:46,653 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:42:46,653 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 5046dfad2c514a2ead5febc73e7c9fc7, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1731328964758 2024-11-11T12:42:46,653 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:46,654 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:46,654 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e27ac802f5f54604922e6441d022963c, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1731328964758 2024-11-11T12:42:46,659 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/49e18ee0f9bd457bb14d988b77d43dfb is 50, key is test_row_0/A:col10/1731328965416/Put/seqid=0 2024-11-11T12:42:46,670 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#B#compaction#243 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:46,671 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/f656baddf5274c6292e6c745128bf6ef is 50, key is test_row_0/B:col10/1731328965369/Put/seqid=0 2024-11-11T12:42:46,673 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#A#compaction#244 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:46,673 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/50a8ac66b8414955b126b4f185069709 is 50, key is test_row_0/A:col10/1731328965369/Put/seqid=0 2024-11-11T12:42:46,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742114_1290 (size=12151) 2024-11-11T12:42:46,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742116_1292 (size=12527) 2024-11-11T12:42:46,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742115_1291 (size=12527) 2024-11-11T12:42:46,695 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/50a8ac66b8414955b126b4f185069709 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/50a8ac66b8414955b126b4f185069709 2024-11-11T12:42:46,697 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/f656baddf5274c6292e6c745128bf6ef as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f656baddf5274c6292e6c745128bf6ef 2024-11-11T12:42:46,704 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/A of 10680aa1d1802ca2e3b6db31ab7f417e into 50a8ac66b8414955b126b4f185069709(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:46,704 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:46,705 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/A, priority=13, startTime=1731328966649; duration=0sec 2024-11-11T12:42:46,705 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:46,705 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:A 2024-11-11T12:42:46,705 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:42:46,708 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 46116 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:42:46,708 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/C is initiating minor compaction (all files) 2024-11-11T12:42:46,708 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/C in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:46,709 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/3e3d1b89abbd4a27af0049715f69bd82, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/39dcdf8b31474ffab0bcbea87fe9b339, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/abef0f88bcc44447832300a29b52fafc] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=45.0 K 2024-11-11T12:42:46,709 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e3d1b89abbd4a27af0049715f69bd82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=111, earliestPutTs=1731328961472 2024-11-11T12:42:46,709 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/B of 10680aa1d1802ca2e3b6db31ab7f417e into f656baddf5274c6292e6c745128bf6ef(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:46,710 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:46,710 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/B, priority=13, startTime=1731328966649; duration=0sec 2024-11-11T12:42:46,710 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:46,710 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:B 2024-11-11T12:42:46,710 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6e9b354e0d6c4d1ca8d7a9b47d4c451c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=125, earliestPutTs=1731328963609 2024-11-11T12:42:46,711 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 39dcdf8b31474ffab0bcbea87fe9b339, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=151, earliestPutTs=1731328963628 2024-11-11T12:42:46,712 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting abef0f88bcc44447832300a29b52fafc, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1731328964758 2024-11-11T12:42:46,730 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#C#compaction#245 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:46,730 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/4e3d28808f5e4864b2d99ac278abcd01 is 50, key is test_row_0/C:col10/1731328965369/Put/seqid=0 2024-11-11T12:42:46,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742117_1293 (size=12493) 2024-11-11T12:42:46,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-11T12:42:47,076 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/49e18ee0f9bd457bb14d988b77d43dfb 2024-11-11T12:42:47,084 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e033fafff1dd4c7daa94a1cd42f3044f is 50, key is test_row_0/B:col10/1731328965416/Put/seqid=0 2024-11-11T12:42:47,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742118_1294 (size=12151) 2024-11-11T12:42:47,165 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/4e3d28808f5e4864b2d99ac278abcd01 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e3d28808f5e4864b2d99ac278abcd01 2024-11-11T12:42:47,171 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/C of 10680aa1d1802ca2e3b6db31ab7f417e into 4e3d28808f5e4864b2d99ac278abcd01(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:47,172 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:47,172 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/C, priority=12, startTime=1731328966649; duration=0sec 2024-11-11T12:42:47,172 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:47,172 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:C 2024-11-11T12:42:47,489 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e033fafff1dd4c7daa94a1cd42f3044f 2024-11-11T12:42:47,502 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/0f7495b21ee04640a2334d12eb622c82 is 50, key is test_row_0/C:col10/1731328965416/Put/seqid=0 2024-11-11T12:42:47,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742119_1295 (size=12151) 2024-11-11T12:42:47,512 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=189 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/0f7495b21ee04640a2334d12eb622c82 2024-11-11T12:42:47,521 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/49e18ee0f9bd457bb14d988b77d43dfb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/49e18ee0f9bd457bb14d988b77d43dfb 2024-11-11T12:42:47,526 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/49e18ee0f9bd457bb14d988b77d43dfb, entries=150, sequenceid=189, filesize=11.9 K 2024-11-11T12:42:47,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e033fafff1dd4c7daa94a1cd42f3044f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e033fafff1dd4c7daa94a1cd42f3044f 2024-11-11T12:42:47,534 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e033fafff1dd4c7daa94a1cd42f3044f, entries=150, sequenceid=189, filesize=11.9 K 2024-11-11T12:42:47,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/0f7495b21ee04640a2334d12eb622c82 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/0f7495b21ee04640a2334d12eb622c82 2024-11-11T12:42:47,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:47,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:47,550 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/0f7495b21ee04640a2334d12eb622c82, entries=150, sequenceid=189, filesize=11.9 K 2024-11-11T12:42:47,551 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=26.84 KB/27480 for 10680aa1d1802ca2e3b6db31ab7f417e in 898ms, sequenceid=189, compaction requested=false 2024-11-11T12:42:47,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:47,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:47,551 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-11T12:42:47,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=80 2024-11-11T12:42:47,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=80, resume processing ppid=79 2024-11-11T12:42:47,555 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, ppid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6660 sec 2024-11-11T12:42:47,557 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:42:47,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:47,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=79, table=TestAcidGuarantees in 1.6720 sec 2024-11-11T12:42:47,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:47,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:47,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:47,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:47,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:47,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:47,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/d14e7d4a29514c1280af60965307b2ca is 50, key is test_row_0/A:col10/1731328967556/Put/seqid=0 2024-11-11T12:42:47,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742120_1296 (size=9757) 2024-11-11T12:42:47,583 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329027580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,584 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329027582, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,585 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329027583, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,586 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329027584, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,685 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329027684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,686 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329027685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,689 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329027687, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,690 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329027688, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34882 deadline: 1731329027793, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,796 DEBUG [Thread-1183 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8166 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., hostname=32e78532c8b1,44673,1731328897232, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:47,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329027890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329027890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329027891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,892 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:47,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329027891, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:47,975 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/d14e7d4a29514c1280af60965307b2ca 2024-11-11T12:42:47,984 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/b08c3493bd0248e5b2db3ce64081d3a3 is 50, key is test_row_0/B:col10/1731328967556/Put/seqid=0 2024-11-11T12:42:47,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742121_1297 (size=9757) 2024-11-11T12:42:47,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=79 2024-11-11T12:42:47,990 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 79 completed 2024-11-11T12:42:47,991 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:47,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees 2024-11-11T12:42:47,993 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:47,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-11T12:42:47,994 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=81, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:47,994 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:48,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-11T12:42:48,146 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,146 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-11T12:42:48,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:48,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,147 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,147 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:48,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329028192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:48,196 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:48,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329028194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329028194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,196 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:48,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329028194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-11T12:42:48,299 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,299 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-11T12:42:48,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:48,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,300 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,390 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/b08c3493bd0248e5b2db3ce64081d3a3 2024-11-11T12:42:48,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/e752aa6e2bf7417fbd6edd790115fd05 is 50, key is test_row_0/C:col10/1731328967556/Put/seqid=0 2024-11-11T12:42:48,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742122_1298 (size=9757) 2024-11-11T12:42:48,452 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,453 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-11T12:42:48,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:48,453 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,453 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,454 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-11T12:42:48,611 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,612 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-11T12:42:48,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:48,612 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,612 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329028697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,699 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:48,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329028697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:48,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329028700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,701 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:48,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329028700, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,767 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,767 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-11T12:42:48,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:48,767 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,768 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] handler.RSProcedureHandler(58): pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,768 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=82 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=82 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:48,804 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=205 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/e752aa6e2bf7417fbd6edd790115fd05 2024-11-11T12:42:48,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/d14e7d4a29514c1280af60965307b2ca as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/d14e7d4a29514c1280af60965307b2ca 2024-11-11T12:42:48,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/d14e7d4a29514c1280af60965307b2ca, entries=100, sequenceid=205, filesize=9.5 K 2024-11-11T12:42:48,828 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/b08c3493bd0248e5b2db3ce64081d3a3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/b08c3493bd0248e5b2db3ce64081d3a3 2024-11-11T12:42:48,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/b08c3493bd0248e5b2db3ce64081d3a3, entries=100, sequenceid=205, filesize=9.5 K 2024-11-11T12:42:48,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/e752aa6e2bf7417fbd6edd790115fd05 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e752aa6e2bf7417fbd6edd790115fd05 2024-11-11T12:42:48,841 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e752aa6e2bf7417fbd6edd790115fd05, entries=100, sequenceid=205, filesize=9.5 K 2024-11-11T12:42:48,842 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for 10680aa1d1802ca2e3b6db31ab7f417e in 1286ms, sequenceid=205, compaction requested=true 2024-11-11T12:42:48,842 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:48,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:48,842 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:48,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:48,843 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:48,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:48,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:48,843 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:48,843 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:48,844 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34435 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:48,844 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/B is initiating minor compaction (all files) 2024-11-11T12:42:48,844 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/B in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,844 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f656baddf5274c6292e6c745128bf6ef, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e033fafff1dd4c7daa94a1cd42f3044f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/b08c3493bd0248e5b2db3ce64081d3a3] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=33.6 K 2024-11-11T12:42:48,844 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34435 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:48,844 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/A is initiating minor compaction (all files) 2024-11-11T12:42:48,844 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/A in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,844 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/50a8ac66b8414955b126b4f185069709, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/49e18ee0f9bd457bb14d988b77d43dfb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/d14e7d4a29514c1280af60965307b2ca] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=33.6 K 2024-11-11T12:42:48,845 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 50a8ac66b8414955b126b4f185069709, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1731328963637 2024-11-11T12:42:48,845 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f656baddf5274c6292e6c745128bf6ef, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1731328963637 2024-11-11T12:42:48,845 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e033fafff1dd4c7daa94a1cd42f3044f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731328965416 2024-11-11T12:42:48,845 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 49e18ee0f9bd457bb14d988b77d43dfb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731328965416 2024-11-11T12:42:48,845 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b08c3493bd0248e5b2db3ce64081d3a3, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731328967555 2024-11-11T12:42:48,845 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d14e7d4a29514c1280af60965307b2ca, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731328967555 2024-11-11T12:42:48,854 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#A#compaction#251 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:48,854 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#B#compaction#252 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:48,854 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4c4c930b255949d5a86f2961cd67b7d0 is 50, key is test_row_0/A:col10/1731328967556/Put/seqid=0 2024-11-11T12:42:48,854 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/7bb6520220a64255ad6935f7936c764c is 50, key is test_row_0/B:col10/1731328967556/Put/seqid=0 2024-11-11T12:42:48,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742123_1299 (size=12629) 2024-11-11T12:42:48,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742124_1300 (size=12629) 2024-11-11T12:42:48,920 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:48,924 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=82 2024-11-11T12:42:48,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:48,924 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:42:48,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:48,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:48,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:48,925 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:48,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:48,926 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:48,934 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5dee5c4a608e4971bca4c2bc26062fc2 is 50, key is test_row_0/A:col10/1731328967581/Put/seqid=0 2024-11-11T12:42:48,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742125_1301 (size=12151) 2024-11-11T12:42:49,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-11T12:42:49,281 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/7bb6520220a64255ad6935f7936c764c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7bb6520220a64255ad6935f7936c764c 2024-11-11T12:42:49,286 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4c4c930b255949d5a86f2961cd67b7d0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4c4c930b255949d5a86f2961cd67b7d0 2024-11-11T12:42:49,300 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/B of 10680aa1d1802ca2e3b6db31ab7f417e into 7bb6520220a64255ad6935f7936c764c(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:49,300 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:49,300 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/B, priority=13, startTime=1731328968842; duration=0sec 2024-11-11T12:42:49,300 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:49,300 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:B 2024-11-11T12:42:49,300 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:49,302 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/A of 10680aa1d1802ca2e3b6db31ab7f417e into 4c4c930b255949d5a86f2961cd67b7d0(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:49,302 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:49,302 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/A, priority=13, startTime=1731328968842; duration=0sec 2024-11-11T12:42:49,302 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:49,303 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:A 2024-11-11T12:42:49,305 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34401 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:49,305 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/C is initiating minor compaction (all files) 2024-11-11T12:42:49,305 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/C in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:49,305 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e3d28808f5e4864b2d99ac278abcd01, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/0f7495b21ee04640a2334d12eb622c82, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e752aa6e2bf7417fbd6edd790115fd05] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=33.6 K 2024-11-11T12:42:49,305 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4e3d28808f5e4864b2d99ac278abcd01, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=165, earliestPutTs=1731328963637 2024-11-11T12:42:49,306 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0f7495b21ee04640a2334d12eb622c82, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=189, earliestPutTs=1731328965416 2024-11-11T12:42:49,306 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e752aa6e2bf7417fbd6edd790115fd05, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731328967555 2024-11-11T12:42:49,319 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#C#compaction#254 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:49,320 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/eddb45bd6da447068e33aeb44e48a1a6 is 50, key is test_row_0/C:col10/1731328967556/Put/seqid=0 2024-11-11T12:42:49,339 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5dee5c4a608e4971bca4c2bc26062fc2 2024-11-11T12:42:49,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742126_1302 (size=12595) 2024-11-11T12:42:49,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e4a0fb5ab4064589bd3d357a585ff8e6 is 50, key is test_row_0/B:col10/1731328967581/Put/seqid=0 2024-11-11T12:42:49,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742127_1303 (size=12151) 2024-11-11T12:42:49,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:49,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:49,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329029711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:49,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:49,713 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:49,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329029711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329029711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:49,714 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:49,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329029712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:49,751 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/eddb45bd6da447068e33aeb44e48a1a6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eddb45bd6da447068e33aeb44e48a1a6 2024-11-11T12:42:49,757 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/C of 10680aa1d1802ca2e3b6db31ab7f417e into eddb45bd6da447068e33aeb44e48a1a6(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:49,757 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:49,757 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/C, priority=13, startTime=1731328968843; duration=0sec 2024-11-11T12:42:49,757 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:49,757 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:C 2024-11-11T12:42:49,759 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e4a0fb5ab4064589bd3d357a585ff8e6 2024-11-11T12:42:49,772 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/4c32921daee942e490814a5174a41d6b is 50, key is test_row_0/C:col10/1731328967581/Put/seqid=0 2024-11-11T12:42:49,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742128_1304 (size=12151) 2024-11-11T12:42:49,783 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/4c32921daee942e490814a5174a41d6b 2024-11-11T12:42:49,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5dee5c4a608e4971bca4c2bc26062fc2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5dee5c4a608e4971bca4c2bc26062fc2 2024-11-11T12:42:49,804 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5dee5c4a608e4971bca4c2bc26062fc2, entries=150, sequenceid=229, filesize=11.9 K 2024-11-11T12:42:49,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e4a0fb5ab4064589bd3d357a585ff8e6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e4a0fb5ab4064589bd3d357a585ff8e6 2024-11-11T12:42:49,811 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e4a0fb5ab4064589bd3d357a585ff8e6, entries=150, sequenceid=229, filesize=11.9 K 2024-11-11T12:42:49,813 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/4c32921daee942e490814a5174a41d6b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4c32921daee942e490814a5174a41d6b 2024-11-11T12:42:49,816 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:49,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329029814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:49,818 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:49,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329029818, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:49,821 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:49,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329029820, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:49,821 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4c32921daee942e490814a5174a41d6b, entries=150, sequenceid=229, filesize=11.9 K 2024-11-11T12:42:49,824 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 10680aa1d1802ca2e3b6db31ab7f417e in 900ms, sequenceid=229, compaction requested=false 2024-11-11T12:42:49,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:49,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:49,824 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=82}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=82 2024-11-11T12:42:49,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=82 2024-11-11T12:42:49,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=82, resume processing ppid=81 2024-11-11T12:42:49,828 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.8310 sec 2024-11-11T12:42:49,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=81, table=TestAcidGuarantees in 1.8370 sec 2024-11-11T12:42:50,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:50,022 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:42:50,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:50,023 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:50,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:50,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:50,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:50,024 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:50,031 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/3831d8e204904360adf57aaf7fea5798 is 50, key is test_row_0/A:col10/1731328969710/Put/seqid=0 2024-11-11T12:42:50,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742129_1305 (size=16931) 2024-11-11T12:42:50,063 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/3831d8e204904360adf57aaf7fea5798 2024-11-11T12:42:50,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/332e9258b0144dc7871084d9217111d3 is 50, key is test_row_0/B:col10/1731328969710/Put/seqid=0 2024-11-11T12:42:50,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-11-11T12:42:50,111 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 81 completed 2024-11-11T12:42:50,113 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:50,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees 2024-11-11T12:42:50,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-11T12:42:50,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742130_1306 (size=12151) 2024-11-11T12:42:50,115 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:50,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,116 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=83, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:50,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329030113, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:50,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329030116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,119 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329030117, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-11T12:42:50,218 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329030217, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,225 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329030224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,232 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329030232, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,272 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,273 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-11T12:42:50,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:50,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,273 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,273 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-11T12:42:50,425 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329030424, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,429 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,430 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-11T12:42:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,432 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,432 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329030434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329030444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,521 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/332e9258b0144dc7871084d9217111d3 2024-11-11T12:42:50,569 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1135219c5d3544d5a4b88ac82a4006f3 is 50, key is test_row_0/C:col10/1731328969710/Put/seqid=0 2024-11-11T12:42:50,587 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-11T12:42:50,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:50,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742131_1307 (size=12151) 2024-11-11T12:42:50,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-11T12:42:50,729 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329030727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329030736, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,754 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:50,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329030753, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,759 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,760 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-11T12:42:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,760 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,760 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,912 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:50,912 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-11T12:42:50,912 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:50,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:50,913 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] handler.RSProcedureHandler(58): pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,913 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=84 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:50,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=84 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:51,037 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=246 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1135219c5d3544d5a4b88ac82a4006f3 2024-11-11T12:42:51,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/3831d8e204904360adf57aaf7fea5798 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/3831d8e204904360adf57aaf7fea5798 2024-11-11T12:42:51,046 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/3831d8e204904360adf57aaf7fea5798, entries=250, sequenceid=246, filesize=16.5 K 2024-11-11T12:42:51,047 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/332e9258b0144dc7871084d9217111d3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/332e9258b0144dc7871084d9217111d3 2024-11-11T12:42:51,051 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/332e9258b0144dc7871084d9217111d3, entries=150, sequenceid=246, filesize=11.9 K 2024-11-11T12:42:51,052 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1135219c5d3544d5a4b88ac82a4006f3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1135219c5d3544d5a4b88ac82a4006f3 2024-11-11T12:42:51,055 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1135219c5d3544d5a4b88ac82a4006f3, entries=150, sequenceid=246, filesize=11.9 K 2024-11-11T12:42:51,056 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 10680aa1d1802ca2e3b6db31ab7f417e in 1033ms, sequenceid=246, compaction requested=true 2024-11-11T12:42:51,056 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:51,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:51,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:51,056 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:51,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:51,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:51,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:51,056 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:51,056 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:51,058 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 41711 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:51,058 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36931 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:51,058 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/A is initiating minor compaction (all files) 2024-11-11T12:42:51,058 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/B is initiating minor compaction (all files) 2024-11-11T12:42:51,058 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/A in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:51,058 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/B in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:51,058 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4c4c930b255949d5a86f2961cd67b7d0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5dee5c4a608e4971bca4c2bc26062fc2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/3831d8e204904360adf57aaf7fea5798] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=40.7 K 2024-11-11T12:42:51,058 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7bb6520220a64255ad6935f7936c764c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e4a0fb5ab4064589bd3d357a585ff8e6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/332e9258b0144dc7871084d9217111d3] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=36.1 K 2024-11-11T12:42:51,058 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bb6520220a64255ad6935f7936c764c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731328965419 2024-11-11T12:42:51,058 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c4c930b255949d5a86f2961cd67b7d0, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731328965419 2024-11-11T12:42:51,059 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dee5c4a608e4971bca4c2bc26062fc2, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1731328967575 2024-11-11T12:42:51,059 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e4a0fb5ab4064589bd3d357a585ff8e6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1731328967575 2024-11-11T12:42:51,059 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3831d8e204904360adf57aaf7fea5798, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731328969705 2024-11-11T12:42:51,059 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 332e9258b0144dc7871084d9217111d3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731328969705 2024-11-11T12:42:51,065 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,065 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=84 2024-11-11T12:42:51,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:51,065 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:42:51,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:51,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:51,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:51,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:51,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:51,066 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:51,067 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#B#compaction#260 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:51,067 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e838ce0cdec14d38b73d57cdfebf47c6 is 50, key is test_row_0/B:col10/1731328969710/Put/seqid=0 2024-11-11T12:42:51,069 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#A#compaction#261 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:51,069 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/dadd7a6ae4e24a85aee9bc1f137f3bd5 is 50, key is test_row_0/A:col10/1731328969710/Put/seqid=0 2024-11-11T12:42:51,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/2ca2e412c95041b49c753b235316136e is 50, key is test_row_0/A:col10/1731328970098/Put/seqid=0 2024-11-11T12:42:51,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742133_1309 (size=12731) 2024-11-11T12:42:51,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742134_1310 (size=12301) 2024-11-11T12:42:51,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742132_1308 (size=12731) 2024-11-11T12:42:51,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-11T12:42:51,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:51,232 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:51,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329031251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,255 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329031253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329031256, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,369 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329031363, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,359 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329031356, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,485 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/2ca2e412c95041b49c753b235316136e 2024-11-11T12:42:51,511 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/dadd7a6ae4e24a85aee9bc1f137f3bd5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/dadd7a6ae4e24a85aee9bc1f137f3bd5 2024-11-11T12:42:51,515 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/e838ce0cdec14d38b73d57cdfebf47c6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e838ce0cdec14d38b73d57cdfebf47c6 2024-11-11T12:42:51,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/8929b6d000dc4e9296d657ce1e1103e1 is 50, key is test_row_0/B:col10/1731328970098/Put/seqid=0 2024-11-11T12:42:51,523 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/A of 10680aa1d1802ca2e3b6db31ab7f417e into dadd7a6ae4e24a85aee9bc1f137f3bd5(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:51,523 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:51,523 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/A, priority=13, startTime=1731328971056; duration=0sec 2024-11-11T12:42:51,523 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:51,523 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:A 2024-11-11T12:42:51,523 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:51,526 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:51,526 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/C is initiating minor compaction (all files) 2024-11-11T12:42:51,526 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/C in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:51,526 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eddb45bd6da447068e33aeb44e48a1a6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4c32921daee942e490814a5174a41d6b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1135219c5d3544d5a4b88ac82a4006f3] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=36.0 K 2024-11-11T12:42:51,527 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting eddb45bd6da447068e33aeb44e48a1a6, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=205, earliestPutTs=1731328965419 2024-11-11T12:42:51,527 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4c32921daee942e490814a5174a41d6b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1731328967575 2024-11-11T12:42:51,528 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1135219c5d3544d5a4b88ac82a4006f3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731328969705 2024-11-11T12:42:51,529 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/B of 10680aa1d1802ca2e3b6db31ab7f417e into e838ce0cdec14d38b73d57cdfebf47c6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:51,529 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:51,529 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/B, priority=13, startTime=1731328971056; duration=0sec 2024-11-11T12:42:51,529 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:51,529 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:B 2024-11-11T12:42:51,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742135_1311 (size=12301) 2024-11-11T12:42:51,546 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#C#compaction#264 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:51,546 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/8929b6d000dc4e9296d657ce1e1103e1 2024-11-11T12:42:51,547 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/e0149f21cf32448391208d6c37046aeb is 50, key is test_row_0/C:col10/1731328969710/Put/seqid=0 2024-11-11T12:42:51,579 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329031575, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329031577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/f8376c2e142c41508a334f990a539203 is 50, key is test_row_0/C:col10/1731328970098/Put/seqid=0 2024-11-11T12:42:51,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742136_1312 (size=12697) 2024-11-11T12:42:51,596 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/e0149f21cf32448391208d6c37046aeb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e0149f21cf32448391208d6c37046aeb 2024-11-11T12:42:51,608 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/C of 10680aa1d1802ca2e3b6db31ab7f417e into e0149f21cf32448391208d6c37046aeb(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:51,608 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:51,608 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/C, priority=13, startTime=1731328971056; duration=0sec 2024-11-11T12:42:51,609 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:51,609 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:C 2024-11-11T12:42:51,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742137_1313 (size=12301) 2024-11-11T12:42:51,626 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=269 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/f8376c2e142c41508a334f990a539203 2024-11-11T12:42:51,638 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/2ca2e412c95041b49c753b235316136e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2ca2e412c95041b49c753b235316136e 2024-11-11T12:42:51,645 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2ca2e412c95041b49c753b235316136e, entries=150, sequenceid=269, filesize=12.0 K 2024-11-11T12:42:51,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/8929b6d000dc4e9296d657ce1e1103e1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/8929b6d000dc4e9296d657ce1e1103e1 2024-11-11T12:42:51,655 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/8929b6d000dc4e9296d657ce1e1103e1, entries=150, sequenceid=269, filesize=12.0 K 2024-11-11T12:42:51,657 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/f8376c2e142c41508a334f990a539203 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/f8376c2e142c41508a334f990a539203 2024-11-11T12:42:51,664 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/f8376c2e142c41508a334f990a539203, entries=150, sequenceid=269, filesize=12.0 K 2024-11-11T12:42:51,665 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 10680aa1d1802ca2e3b6db31ab7f417e in 600ms, sequenceid=269, compaction requested=false 2024-11-11T12:42:51,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:51,665 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:51,666 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=84}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=84 2024-11-11T12:42:51,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=84 2024-11-11T12:42:51,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=83 2024-11-11T12:42:51,669 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5510 sec 2024-11-11T12:42:51,671 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=83, table=TestAcidGuarantees in 1.5570 sec 2024-11-11T12:42:51,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:51,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:42:51,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:51,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:51,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:51,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:51,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:51,744 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:51,752 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/432b277ece114ff69fc3bf25acccd08c is 50, key is test_row_0/A:col10/1731328971242/Put/seqid=0 2024-11-11T12:42:51,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742138_1314 (size=12301) 2024-11-11T12:42:51,783 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/432b277ece114ff69fc3bf25acccd08c 2024-11-11T12:42:51,798 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/c412152e5d9d4b3dbb061411dc50e37f is 50, key is test_row_0/B:col10/1731328971242/Put/seqid=0 2024-11-11T12:42:51,822 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742139_1315 (size=12301) 2024-11-11T12:42:51,856 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329031854, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,894 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329031892, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,901 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329031900, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:51,963 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:51,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329031960, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,176 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,176 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329032175, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=83 2024-11-11T12:42:52,220 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 83 completed 2024-11-11T12:42:52,228 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:52,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/c412152e5d9d4b3dbb061411dc50e37f 2024-11-11T12:42:52,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees 2024-11-11T12:42:52,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-11T12:42:52,249 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:52,256 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=85, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:52,256 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=86, ppid=85, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:52,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/b1d96e991ffa41d2a649d6839818b820 is 50, key is test_row_0/C:col10/1731328971242/Put/seqid=0 2024-11-11T12:42:52,280 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329032278, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742140_1316 (size=12301) 2024-11-11T12:42:52,320 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=287 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/b1d96e991ffa41d2a649d6839818b820 2024-11-11T12:42:52,329 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/432b277ece114ff69fc3bf25acccd08c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/432b277ece114ff69fc3bf25acccd08c 2024-11-11T12:42:52,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/432b277ece114ff69fc3bf25acccd08c, entries=150, sequenceid=287, filesize=12.0 K 2024-11-11T12:42:52,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/c412152e5d9d4b3dbb061411dc50e37f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/c412152e5d9d4b3dbb061411dc50e37f 2024-11-11T12:42:52,345 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/c412152e5d9d4b3dbb061411dc50e37f, entries=150, sequenceid=287, filesize=12.0 K 2024-11-11T12:42:52,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/b1d96e991ffa41d2a649d6839818b820 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1d96e991ffa41d2a649d6839818b820 2024-11-11T12:42:52,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-11T12:42:52,352 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1d96e991ffa41d2a649d6839818b820, entries=150, sequenceid=287, filesize=12.0 K 2024-11-11T12:42:52,353 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for 10680aa1d1802ca2e3b6db31ab7f417e in 613ms, sequenceid=287, compaction requested=true 2024-11-11T12:42:52,353 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:52,354 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:52,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:52,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:52,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:52,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:52,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:52,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:42:52,354 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:52,355 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:52,355 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/A is initiating minor compaction (all files) 2024-11-11T12:42:52,355 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/A in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,355 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/dadd7a6ae4e24a85aee9bc1f137f3bd5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2ca2e412c95041b49c753b235316136e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/432b277ece114ff69fc3bf25acccd08c] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=36.5 K 2024-11-11T12:42:52,356 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting dadd7a6ae4e24a85aee9bc1f137f3bd5, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731328969705 2024-11-11T12:42:52,356 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37333 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:52,356 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/B is initiating minor compaction (all files) 2024-11-11T12:42:52,356 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/B in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,356 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e838ce0cdec14d38b73d57cdfebf47c6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/8929b6d000dc4e9296d657ce1e1103e1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/c412152e5d9d4b3dbb061411dc50e37f] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=36.5 K 2024-11-11T12:42:52,357 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2ca2e412c95041b49c753b235316136e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1731328970098 2024-11-11T12:42:52,357 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e838ce0cdec14d38b73d57cdfebf47c6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731328969705 2024-11-11T12:42:52,357 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 432b277ece114ff69fc3bf25acccd08c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731328971242 2024-11-11T12:42:52,358 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8929b6d000dc4e9296d657ce1e1103e1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1731328970098 2024-11-11T12:42:52,358 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c412152e5d9d4b3dbb061411dc50e37f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731328971242 2024-11-11T12:42:52,367 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#B#compaction#269 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:52,368 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/28db06d00c2b4545b14622eed42ae298 is 50, key is test_row_0/B:col10/1731328971242/Put/seqid=0 2024-11-11T12:42:52,375 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#A#compaction#270 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:52,376 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/a42d77ae6c0c4698b1f4fae00674b391 is 50, key is test_row_0/A:col10/1731328971242/Put/seqid=0 2024-11-11T12:42:52,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:52,400 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:42:52,400 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:52,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:52,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:52,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:52,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:52,401 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:52,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742142_1318 (size=12983) 2024-11-11T12:42:52,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742141_1317 (size=12983) 2024-11-11T12:42:52,416 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,419 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/f621ae6b0a85419ab24feb57a7c61585 is 50, key is test_row_0/A:col10/1731328972398/Put/seqid=0 2024-11-11T12:42:52,420 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-11T12:42:52,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:52,420 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,420 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,430 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329032427, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,435 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329032431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,436 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/a42d77ae6c0c4698b1f4fae00674b391 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/a42d77ae6c0c4698b1f4fae00674b391 2024-11-11T12:42:52,444 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/A of 10680aa1d1802ca2e3b6db31ab7f417e into a42d77ae6c0c4698b1f4fae00674b391(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:52,444 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:52,444 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/A, priority=13, startTime=1731328972353; duration=0sec 2024-11-11T12:42:52,444 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:52,444 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:A 2024-11-11T12:42:52,445 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:52,449 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37299 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:52,449 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/C is initiating minor compaction (all files) 2024-11-11T12:42:52,449 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/C in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,449 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e0149f21cf32448391208d6c37046aeb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/f8376c2e142c41508a334f990a539203, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1d96e991ffa41d2a649d6839818b820] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=36.4 K 2024-11-11T12:42:52,449 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e0149f21cf32448391208d6c37046aeb, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=246, earliestPutTs=1731328969705 2024-11-11T12:42:52,450 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f8376c2e142c41508a334f990a539203, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=269, earliestPutTs=1731328970098 2024-11-11T12:42:52,450 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b1d96e991ffa41d2a649d6839818b820, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731328971242 2024-11-11T12:42:52,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742143_1319 (size=12301) 2024-11-11T12:42:52,480 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#C#compaction#272 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:52,480 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/d9567cd402d149f78f035dc9bf3c0657 is 50, key is test_row_0/C:col10/1731328971242/Put/seqid=0 2024-11-11T12:42:52,485 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329032481, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742144_1320 (size=12949) 2024-11-11T12:42:52,534 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329032532, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,538 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329032537, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-11T12:42:52,580 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,581 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-11T12:42:52,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:52,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,581 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,581 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,735 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,740 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-11T12:42:52,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:52,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,741 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,741 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,741 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329032740, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,742 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329032741, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,836 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/28db06d00c2b4545b14622eed42ae298 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/28db06d00c2b4545b14622eed42ae298 2024-11-11T12:42:52,852 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/B of 10680aa1d1802ca2e3b6db31ab7f417e into 28db06d00c2b4545b14622eed42ae298(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:52,852 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:52,852 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/B, priority=13, startTime=1731328972354; duration=0sec 2024-11-11T12:42:52,853 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:52,853 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:B 2024-11-11T12:42:52,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-11T12:42:52,870 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/f621ae6b0a85419ab24feb57a7c61585 2024-11-11T12:42:52,884 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/665b68d3ad764299841b9f56004d2563 is 50, key is test_row_0/B:col10/1731328972398/Put/seqid=0 2024-11-11T12:42:52,894 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:52,895 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-11T12:42:52,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:52,895 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:52,895 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:52,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742145_1321 (size=12301) 2024-11-11T12:42:52,914 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/665b68d3ad764299841b9f56004d2563 2024-11-11T12:42:52,928 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/d3c9b3082e1d4ce8b07734d22dd0b409 is 50, key is test_row_0/C:col10/1731328972398/Put/seqid=0 2024-11-11T12:42:52,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742146_1322 (size=12301) 2024-11-11T12:42:52,950 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/d9567cd402d149f78f035dc9bf3c0657 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d9567cd402d149f78f035dc9bf3c0657 2024-11-11T12:42:52,957 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/C of 10680aa1d1802ca2e3b6db31ab7f417e into d9567cd402d149f78f035dc9bf3c0657(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:52,957 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:52,957 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/C, priority=13, startTime=1731328972354; duration=0sec 2024-11-11T12:42:52,957 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:52,957 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:C 2024-11-11T12:42:52,992 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:52,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329032990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,048 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,048 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-11T12:42:53,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:53,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:53,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:53,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,051 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329033046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,052 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329033046, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,202 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,207 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-11T12:42:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:53,207 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,207 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,352 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=309 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/d3c9b3082e1d4ce8b07734d22dd0b409 2024-11-11T12:42:53,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-11T12:42:53,359 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,360 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-11T12:42:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:53,360 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] handler.RSProcedureHandler(58): pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=86 java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=86 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:42:53,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/f621ae6b0a85419ab24feb57a7c61585 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/f621ae6b0a85419ab24feb57a7c61585 2024-11-11T12:42:53,366 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/f621ae6b0a85419ab24feb57a7c61585, entries=150, sequenceid=309, filesize=12.0 K 2024-11-11T12:42:53,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/665b68d3ad764299841b9f56004d2563 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/665b68d3ad764299841b9f56004d2563 2024-11-11T12:42:53,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/665b68d3ad764299841b9f56004d2563, entries=150, sequenceid=309, filesize=12.0 K 2024-11-11T12:42:53,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/d3c9b3082e1d4ce8b07734d22dd0b409 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d3c9b3082e1d4ce8b07734d22dd0b409 2024-11-11T12:42:53,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d3c9b3082e1d4ce8b07734d22dd0b409, entries=150, sequenceid=309, filesize=12.0 K 2024-11-11T12:42:53,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for 10680aa1d1802ca2e3b6db31ab7f417e in 997ms, sequenceid=309, compaction requested=false 2024-11-11T12:42:53,397 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:53,513 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,516 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=86 2024-11-11T12:42:53,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:53,518 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:42:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:53,519 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:53,542 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5d3818bd39d74173a6459aa14baf83d9 is 50, key is test_row_0/A:col10/1731328972425/Put/seqid=0 2024-11-11T12:42:53,569 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:53,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:53,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742147_1323 (size=12301) 2024-11-11T12:42:53,624 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329033620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,625 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329033624, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,727 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329033727, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,731 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329033729, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,934 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329033933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329033933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:53,996 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:53,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329033996, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:54,000 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5d3818bd39d74173a6459aa14baf83d9 2024-11-11T12:42:54,011 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/f691eeb65a334f808ab4dadbd9d1ab52 is 50, key is test_row_0/B:col10/1731328972425/Put/seqid=0 2024-11-11T12:42:54,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742148_1324 (size=12301) 2024-11-11T12:42:54,050 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/f691eeb65a334f808ab4dadbd9d1ab52 2024-11-11T12:42:54,075 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/c8b519f84cd241caac20faa17b679a3c is 50, key is test_row_0/C:col10/1731328972425/Put/seqid=0 2024-11-11T12:42:54,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742149_1325 (size=12301) 2024-11-11T12:42:54,238 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:54,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329034237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:54,243 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:54,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329034242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:54,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:54,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34886 deadline: 1731329034305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:54,308 DEBUG [Thread-1185 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4192 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., hostname=32e78532c8b1,44673,1731328897232, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:54,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-11T12:42:54,514 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/c8b519f84cd241caac20faa17b679a3c 2024-11-11T12:42:54,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/5d3818bd39d74173a6459aa14baf83d9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5d3818bd39d74173a6459aa14baf83d9 2024-11-11T12:42:54,525 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5d3818bd39d74173a6459aa14baf83d9, entries=150, sequenceid=326, filesize=12.0 K 2024-11-11T12:42:54,526 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/f691eeb65a334f808ab4dadbd9d1ab52 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f691eeb65a334f808ab4dadbd9d1ab52 2024-11-11T12:42:54,536 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f691eeb65a334f808ab4dadbd9d1ab52, entries=150, sequenceid=326, filesize=12.0 K 2024-11-11T12:42:54,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/c8b519f84cd241caac20faa17b679a3c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/c8b519f84cd241caac20faa17b679a3c 2024-11-11T12:42:54,547 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/c8b519f84cd241caac20faa17b679a3c, entries=150, sequenceid=326, filesize=12.0 K 2024-11-11T12:42:54,548 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=134.18 KB/137400 for 10680aa1d1802ca2e3b6db31ab7f417e in 1030ms, sequenceid=326, compaction requested=true 2024-11-11T12:42:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:54,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=86}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=86 2024-11-11T12:42:54,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=86 2024-11-11T12:42:54,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=86, resume processing ppid=85 2024-11-11T12:42:54,554 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, ppid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.2950 sec 2024-11-11T12:42:54,556 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=85, table=TestAcidGuarantees in 2.3270 sec 2024-11-11T12:42:54,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:54,742 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:42:54,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:54,742 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:54,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:54,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:54,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:54,743 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:54,750 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/10a12584f4684e129e07a027c5da301c is 50, key is test_row_0/A:col10/1731328973603/Put/seqid=0 2024-11-11T12:42:54,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:54,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329034759, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:54,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:54,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329034760, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:54,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742150_1326 (size=12301) 2024-11-11T12:42:54,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:54,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329034863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:54,865 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:54,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329034863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:55,068 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:55,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 204 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329035067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:55,072 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:55,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329035072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:55,172 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/10a12584f4684e129e07a027c5da301c 2024-11-11T12:42:55,180 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/68138ce0583a4dcf93ee8f82f84a5410 is 50, key is test_row_0/B:col10/1731328973603/Put/seqid=0 2024-11-11T12:42:55,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742151_1327 (size=12301) 2024-11-11T12:42:55,371 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:55,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 206 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329035370, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:55,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:55,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329035375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:55,605 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/68138ce0583a4dcf93ee8f82f84a5410 2024-11-11T12:42:55,619 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/2c07b35b8ea84a0287fc40e2e258e438 is 50, key is test_row_0/C:col10/1731328973603/Put/seqid=0 2024-11-11T12:42:55,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742152_1328 (size=12301) 2024-11-11T12:42:55,877 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:55,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 208 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329035876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:55,894 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:55,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329035894, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:56,007 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:56,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34840 deadline: 1731329036006, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:56,008 DEBUG [Thread-1187 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4154 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., hostname=32e78532c8b1,44673,1731328897232, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:42:56,032 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=350 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/2c07b35b8ea84a0287fc40e2e258e438 2024-11-11T12:42:56,045 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/10a12584f4684e129e07a027c5da301c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/10a12584f4684e129e07a027c5da301c 2024-11-11T12:42:56,056 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/10a12584f4684e129e07a027c5da301c, entries=150, sequenceid=350, filesize=12.0 K 2024-11-11T12:42:56,057 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/68138ce0583a4dcf93ee8f82f84a5410 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/68138ce0583a4dcf93ee8f82f84a5410 2024-11-11T12:42:56,061 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/68138ce0583a4dcf93ee8f82f84a5410, entries=150, sequenceid=350, filesize=12.0 K 2024-11-11T12:42:56,062 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/2c07b35b8ea84a0287fc40e2e258e438 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/2c07b35b8ea84a0287fc40e2e258e438 2024-11-11T12:42:56,068 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/2c07b35b8ea84a0287fc40e2e258e438, entries=150, sequenceid=350, filesize=12.0 K 2024-11-11T12:42:56,069 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 10680aa1d1802ca2e3b6db31ab7f417e in 1327ms, sequenceid=350, compaction requested=true 2024-11-11T12:42:56,069 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:56,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:56,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:56,070 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:42:56,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:56,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:56,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:56,070 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:42:56,071 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:42:56,071 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/A is initiating minor compaction (all files) 2024-11-11T12:42:56,071 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/A in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:56,071 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/a42d77ae6c0c4698b1f4fae00674b391, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/f621ae6b0a85419ab24feb57a7c61585, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5d3818bd39d74173a6459aa14baf83d9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/10a12584f4684e129e07a027c5da301c] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=48.7 K 2024-11-11T12:42:56,071 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting a42d77ae6c0c4698b1f4fae00674b391, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731328971242 2024-11-11T12:42:56,072 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:42:56,072 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f621ae6b0a85419ab24feb57a7c61585, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1731328971828 2024-11-11T12:42:56,072 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5d3818bd39d74173a6459aa14baf83d9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731328972416 2024-11-11T12:42:56,073 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 10a12584f4684e129e07a027c5da301c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731328973603 2024-11-11T12:42:56,077 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:42:56,077 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/B is initiating minor compaction (all files) 2024-11-11T12:42:56,077 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/B in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:56,077 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/28db06d00c2b4545b14622eed42ae298, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/665b68d3ad764299841b9f56004d2563, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f691eeb65a334f808ab4dadbd9d1ab52, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/68138ce0583a4dcf93ee8f82f84a5410] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=48.7 K 2024-11-11T12:42:56,077 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 28db06d00c2b4545b14622eed42ae298, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731328971242 2024-11-11T12:42:56,078 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 665b68d3ad764299841b9f56004d2563, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1731328971828 2024-11-11T12:42:56,080 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#A#compaction#281 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:56,081 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4b7215e8e5914b98979ce850cbdba4dd is 50, key is test_row_0/A:col10/1731328973603/Put/seqid=0 2024-11-11T12:42:56,082 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f691eeb65a334f808ab4dadbd9d1ab52, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731328972416 2024-11-11T12:42:56,082 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 68138ce0583a4dcf93ee8f82f84a5410, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731328973603 2024-11-11T12:42:56,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742153_1329 (size=13119) 2024-11-11T12:42:56,096 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#B#compaction#282 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:56,097 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/24404422ab2d4ea4b9ed4c093a4dace6 is 50, key is test_row_0/B:col10/1731328973603/Put/seqid=0 2024-11-11T12:42:56,100 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/4b7215e8e5914b98979ce850cbdba4dd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4b7215e8e5914b98979ce850cbdba4dd 2024-11-11T12:42:56,109 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/A of 10680aa1d1802ca2e3b6db31ab7f417e into 4b7215e8e5914b98979ce850cbdba4dd(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:56,109 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:56,109 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/A, priority=12, startTime=1731328976069; duration=0sec 2024-11-11T12:42:56,109 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:56,109 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:A 2024-11-11T12:42:56,109 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:42:56,110 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49852 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:42:56,110 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/C is initiating minor compaction (all files) 2024-11-11T12:42:56,110 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/C in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:56,110 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d9567cd402d149f78f035dc9bf3c0657, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d3c9b3082e1d4ce8b07734d22dd0b409, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/c8b519f84cd241caac20faa17b679a3c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/2c07b35b8ea84a0287fc40e2e258e438] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=48.7 K 2024-11-11T12:42:56,111 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d9567cd402d149f78f035dc9bf3c0657, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=287, earliestPutTs=1731328971242 2024-11-11T12:42:56,111 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d3c9b3082e1d4ce8b07734d22dd0b409, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=309, earliestPutTs=1731328971828 2024-11-11T12:42:56,111 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting c8b519f84cd241caac20faa17b679a3c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=326, earliestPutTs=1731328972416 2024-11-11T12:42:56,112 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2c07b35b8ea84a0287fc40e2e258e438, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731328973603 2024-11-11T12:42:56,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742154_1330 (size=13119) 2024-11-11T12:42:56,123 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#C#compaction#283 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:56,124 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/eb660226bd8a43c4a1ce8987575b4f72 is 50, key is test_row_0/C:col10/1731328973603/Put/seqid=0 2024-11-11T12:42:56,132 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/24404422ab2d4ea4b9ed4c093a4dace6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/24404422ab2d4ea4b9ed4c093a4dace6 2024-11-11T12:42:56,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742155_1331 (size=13085) 2024-11-11T12:42:56,139 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/B of 10680aa1d1802ca2e3b6db31ab7f417e into 24404422ab2d4ea4b9ed4c093a4dace6(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:56,139 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:56,139 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/B, priority=12, startTime=1731328976070; duration=0sec 2024-11-11T12:42:56,139 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:56,140 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:B 2024-11-11T12:42:56,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=85 2024-11-11T12:42:56,360 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 85 completed 2024-11-11T12:42:56,361 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:42:56,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees 2024-11-11T12:42:56,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-11T12:42:56,363 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:42:56,364 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=87, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:42:56,364 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=88, ppid=87, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:42:56,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-11T12:42:56,516 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:42:56,517 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=88 2024-11-11T12:42:56,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:56,517 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-11T12:42:56,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:56,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:56,517 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:56,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:56,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:56,518 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:56,537 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/2893e85bfc7c4bfcaac58e47080754f7 is 50, key is test_row_0/A:col10/1731328974759/Put/seqid=0 2024-11-11T12:42:56,568 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/eb660226bd8a43c4a1ce8987575b4f72 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eb660226bd8a43c4a1ce8987575b4f72 2024-11-11T12:42:56,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742156_1332 (size=12301) 2024-11-11T12:42:56,577 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/2893e85bfc7c4bfcaac58e47080754f7 2024-11-11T12:42:56,602 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/C of 10680aa1d1802ca2e3b6db31ab7f417e into eb660226bd8a43c4a1ce8987575b4f72(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:56,602 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:56,602 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/C, priority=12, startTime=1731328976070; duration=0sec 2024-11-11T12:42:56,602 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:56,602 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:C 2024-11-11T12:42:56,610 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8 is 50, key is test_row_0/B:col10/1731328974759/Put/seqid=0 2024-11-11T12:42:56,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742157_1333 (size=12301) 2024-11-11T12:42:56,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-11T12:42:56,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:56,890 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. as already flushing 2024-11-11T12:42:56,945 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:56,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 225 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329036943, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:56,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:56,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329036944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:56,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-11T12:42:57,049 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8 2024-11-11T12:42:57,051 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:57,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 227 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329037050, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:57,063 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:57,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329037061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:57,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1b6e1e218a9142c7b907f6b47be71543 is 50, key is test_row_0/C:col10/1731328974759/Put/seqid=0 2024-11-11T12:42:57,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742158_1334 (size=12301) 2024-11-11T12:42:57,194 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1b6e1e218a9142c7b907f6b47be71543 2024-11-11T12:42:57,217 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/2893e85bfc7c4bfcaac58e47080754f7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2893e85bfc7c4bfcaac58e47080754f7 2024-11-11T12:42:57,254 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2893e85bfc7c4bfcaac58e47080754f7, entries=150, sequenceid=364, filesize=12.0 K 2024-11-11T12:42:57,254 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:57,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34860 deadline: 1731329037252, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:57,255 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8 2024-11-11T12:42:57,265 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8, entries=150, sequenceid=364, filesize=12.0 K 2024-11-11T12:42:57,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/1b6e1e218a9142c7b907f6b47be71543 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1b6e1e218a9142c7b907f6b47be71543 2024-11-11T12:42:57,274 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:42:57,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:34896 deadline: 1731329037273, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:42:57,275 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1b6e1e218a9142c7b907f6b47be71543, entries=150, sequenceid=364, filesize=12.0 K 2024-11-11T12:42:57,277 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 10680aa1d1802ca2e3b6db31ab7f417e in 760ms, sequenceid=364, compaction requested=false 2024-11-11T12:42:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:57,277 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-11-11T12:42:57,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-11-11T12:42:57,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=88, resume processing ppid=87 2024-11-11T12:42:57,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 914 msec 2024-11-11T12:42:57,281 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=87, table=TestAcidGuarantees in 919 msec 2024-11-11T12:42:57,340 DEBUG [Thread-1194 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5e998dd3 to 127.0.0.1:54294 2024-11-11T12:42:57,340 DEBUG [Thread-1194 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:57,340 DEBUG [Thread-1192 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x088aa519 to 127.0.0.1:54294 2024-11-11T12:42:57,340 DEBUG [Thread-1192 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:57,340 DEBUG [Thread-1196 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e4c79b8 to 127.0.0.1:54294 2024-11-11T12:42:57,341 DEBUG [Thread-1196 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:57,342 DEBUG [Thread-1198 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d1403c3 to 127.0.0.1:54294 2024-11-11T12:42:57,342 DEBUG [Thread-1190 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78b04266 to 127.0.0.1:54294 2024-11-11T12:42:57,342 DEBUG [Thread-1198 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:57,342 DEBUG [Thread-1190 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:57,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=87 2024-11-11T12:42:57,467 INFO [Thread-1189 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 87 completed 2024-11-11T12:42:57,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:42:57,558 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:42:57,558 DEBUG [Thread-1181 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x64ee0130 to 127.0.0.1:54294 2024-11-11T12:42:57,558 DEBUG [Thread-1181 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:42:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:42:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:42:57,558 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:42:57,562 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/c2ca1b5dce914e568b66edaf19235391 is 50, key is test_row_0/A:col10/1731328977556/Put/seqid=0 2024-11-11T12:42:57,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742159_1335 (size=12301) 2024-11-11T12:42:57,576 DEBUG [Thread-1179 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x44645c55 to 127.0.0.1:54294 2024-11-11T12:42:57,576 DEBUG [Thread-1179 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:57,800 DEBUG [Thread-1183 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x683b64c3 to 127.0.0.1:54294 2024-11-11T12:42:57,801 DEBUG [Thread-1183 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:57,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/c2ca1b5dce914e568b66edaf19235391 2024-11-11T12:42:57,975 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/3c1b58663a4a4ee5a05687d33d3cf721 is 50, key is test_row_0/B:col10/1731328977556/Put/seqid=0 2024-11-11T12:42:57,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742160_1336 (size=12301) 2024-11-11T12:42:58,311 DEBUG [Thread-1185 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x07e55eb7 to 127.0.0.1:54294 2024-11-11T12:42:58,311 DEBUG [Thread-1185 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:42:58,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/3c1b58663a4a4ee5a05687d33d3cf721 2024-11-11T12:42:58,390 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/244abf617709441a8064849fe41121fa is 50, key is test_row_0/C:col10/1731328977556/Put/seqid=0 2024-11-11T12:42:58,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742161_1337 (size=12301) 2024-11-11T12:42:58,819 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/244abf617709441a8064849fe41121fa 2024-11-11T12:42:58,823 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/c2ca1b5dce914e568b66edaf19235391 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c2ca1b5dce914e568b66edaf19235391 2024-11-11T12:42:58,827 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c2ca1b5dce914e568b66edaf19235391, entries=150, sequenceid=390, filesize=12.0 K 2024-11-11T12:42:58,827 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/3c1b58663a4a4ee5a05687d33d3cf721 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3c1b58663a4a4ee5a05687d33d3cf721 2024-11-11T12:42:58,831 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3c1b58663a4a4ee5a05687d33d3cf721, entries=150, sequenceid=390, filesize=12.0 K 2024-11-11T12:42:58,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/244abf617709441a8064849fe41121fa as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/244abf617709441a8064849fe41121fa 2024-11-11T12:42:58,836 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/244abf617709441a8064849fe41121fa, entries=150, sequenceid=390, filesize=12.0 K 2024-11-11T12:42:58,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=20.13 KB/20610 for 10680aa1d1802ca2e3b6db31ab7f417e in 1279ms, sequenceid=390, compaction requested=true 2024-11-11T12:42:58,837 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:58,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:42:58,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:58,837 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:58,837 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:58,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:42:58,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:58,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 10680aa1d1802ca2e3b6db31ab7f417e:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:42:58,837 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:58,838 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:58,838 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/A is initiating minor compaction (all files) 2024-11-11T12:42:58,838 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:58,838 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/A in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:58,838 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/B is initiating minor compaction (all files) 2024-11-11T12:42:58,838 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4b7215e8e5914b98979ce850cbdba4dd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2893e85bfc7c4bfcaac58e47080754f7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c2ca1b5dce914e568b66edaf19235391] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=36.8 K 2024-11-11T12:42:58,838 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/B in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:58,838 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/24404422ab2d4ea4b9ed4c093a4dace6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3c1b58663a4a4ee5a05687d33d3cf721] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=36.8 K 2024-11-11T12:42:58,839 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4b7215e8e5914b98979ce850cbdba4dd, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731328973603 2024-11-11T12:42:58,839 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 24404422ab2d4ea4b9ed4c093a4dace6, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731328973603 2024-11-11T12:42:58,839 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f4cb8ea9ee97483c97c8bd6e9b9a6ec8, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1731328974751 2024-11-11T12:42:58,839 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2893e85bfc7c4bfcaac58e47080754f7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1731328974751 2024-11-11T12:42:58,840 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3c1b58663a4a4ee5a05687d33d3cf721, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1731328976922 2024-11-11T12:42:58,840 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting c2ca1b5dce914e568b66edaf19235391, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1731328976922 2024-11-11T12:42:58,855 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#B#compaction#290 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:58,855 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#A#compaction#291 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:58,856 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/3b168d4c9bda41e08819ad433ab8987d is 50, key is test_row_0/B:col10/1731328977556/Put/seqid=0 2024-11-11T12:42:58,856 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/72da51cc7e7e471b91c809f95c0858a2 is 50, key is test_row_0/A:col10/1731328977556/Put/seqid=0 2024-11-11T12:42:58,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742163_1339 (size=13221) 2024-11-11T12:42:58,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742162_1338 (size=13221) 2024-11-11T12:42:59,276 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/3b168d4c9bda41e08819ad433ab8987d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3b168d4c9bda41e08819ad433ab8987d 2024-11-11T12:42:59,276 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/72da51cc7e7e471b91c809f95c0858a2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/72da51cc7e7e471b91c809f95c0858a2 2024-11-11T12:42:59,281 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/B of 10680aa1d1802ca2e3b6db31ab7f417e into 3b168d4c9bda41e08819ad433ab8987d(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:59,281 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/A of 10680aa1d1802ca2e3b6db31ab7f417e into 72da51cc7e7e471b91c809f95c0858a2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:59,281 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:59,281 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:59,281 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/B, priority=13, startTime=1731328978837; duration=0sec 2024-11-11T12:42:59,281 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/A, priority=13, startTime=1731328978837; duration=0sec 2024-11-11T12:42:59,281 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:42:59,281 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:B 2024-11-11T12:42:59,281 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:59,281 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:42:59,281 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:A 2024-11-11T12:42:59,282 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:42:59,282 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 10680aa1d1802ca2e3b6db31ab7f417e/C is initiating minor compaction (all files) 2024-11-11T12:42:59,282 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 10680aa1d1802ca2e3b6db31ab7f417e/C in TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:42:59,282 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eb660226bd8a43c4a1ce8987575b4f72, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1b6e1e218a9142c7b907f6b47be71543, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/244abf617709441a8064849fe41121fa] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp, totalSize=36.8 K 2024-11-11T12:42:59,283 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting eb660226bd8a43c4a1ce8987575b4f72, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=350, earliestPutTs=1731328973603 2024-11-11T12:42:59,283 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1b6e1e218a9142c7b907f6b47be71543, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1731328974751 2024-11-11T12:42:59,283 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 244abf617709441a8064849fe41121fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1731328976922 2024-11-11T12:42:59,289 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 10680aa1d1802ca2e3b6db31ab7f417e#C#compaction#292 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:42:59,290 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/21548eed47e84fa08d37b0b65eee3740 is 50, key is test_row_0/C:col10/1731328977556/Put/seqid=0 2024-11-11T12:42:59,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742164_1340 (size=13187) 2024-11-11T12:42:59,703 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/21548eed47e84fa08d37b0b65eee3740 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/21548eed47e84fa08d37b0b65eee3740 2024-11-11T12:42:59,707 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 10680aa1d1802ca2e3b6db31ab7f417e/C of 10680aa1d1802ca2e3b6db31ab7f417e into 21548eed47e84fa08d37b0b65eee3740(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:42:59,707 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:42:59,707 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e., storeName=10680aa1d1802ca2e3b6db31ab7f417e/C, priority=13, startTime=1731328978837; duration=0sec 2024-11-11T12:42:59,707 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:42:59,707 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 10680aa1d1802ca2e3b6db31ab7f417e:C 2024-11-11T12:43:00,050 DEBUG [Thread-1187 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x03a703d2 to 127.0.0.1:54294 2024-11-11T12:43:00,050 DEBUG [Thread-1187 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 104 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 9 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 57 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 68 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5686 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5602 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5908 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5661 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5742 2024-11-11T12:43:00,050 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-11T12:43:00,050 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T12:43:00,050 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58341641 to 127.0.0.1:54294 2024-11-11T12:43:00,050 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:00,051 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-11T12:43:00,051 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-11T12:43:00,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:00,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-11T12:43:00,054 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328980054"}]},"ts":"1731328980054"} 2024-11-11T12:43:00,055 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-11T12:43:00,057 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-11T12:43:00,058 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:43:00,059 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=10680aa1d1802ca2e3b6db31ab7f417e, UNASSIGN}] 2024-11-11T12:43:00,060 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=10680aa1d1802ca2e3b6db31ab7f417e, UNASSIGN 2024-11-11T12:43:00,061 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=10680aa1d1802ca2e3b6db31ab7f417e, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:00,062 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:43:00,062 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=92, ppid=91, state=RUNNABLE; CloseRegionProcedure 10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:43:00,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-11T12:43:00,213 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:00,214 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(124): Close 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:43:00,214 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:43:00,214 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1681): Closing 10680aa1d1802ca2e3b6db31ab7f417e, disabling compactions & flushes 2024-11-11T12:43:00,214 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:43:00,214 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:43:00,215 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. after waiting 0 ms 2024-11-11T12:43:00,215 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:43:00,215 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(2837): Flushing 10680aa1d1802ca2e3b6db31ab7f417e 3/3 column families, dataSize=26.84 KB heapSize=71.06 KB 2024-11-11T12:43:00,215 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=A 2024-11-11T12:43:00,215 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:00,215 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=B 2024-11-11T12:43:00,215 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:00,215 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 10680aa1d1802ca2e3b6db31ab7f417e, store=C 2024-11-11T12:43:00,215 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:00,220 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/c69f1b53856049648d8ec76438197e5b is 50, key is test_row_0/A:col10/1731328980048/Put/seqid=0 2024-11-11T12:43:00,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742165_1341 (size=12301) 2024-11-11T12:43:00,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-11T12:43:00,626 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/c69f1b53856049648d8ec76438197e5b 2024-11-11T12:43:00,633 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/5b802ad3ff9d44d491cce39f4b1ad2a3 is 50, key is test_row_0/B:col10/1731328980048/Put/seqid=0 2024-11-11T12:43:00,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742166_1342 (size=12301) 2024-11-11T12:43:00,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-11T12:43:01,038 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/5b802ad3ff9d44d491cce39f4b1ad2a3 2024-11-11T12:43:01,044 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/b1135402dd3040eabd5f35c85b2e6583 is 50, key is test_row_0/C:col10/1731328980048/Put/seqid=0 2024-11-11T12:43:01,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742167_1343 (size=12301) 2024-11-11T12:43:01,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-11T12:43:01,459 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.95 KB at sequenceid=400 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/b1135402dd3040eabd5f35c85b2e6583 2024-11-11T12:43:01,463 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/A/c69f1b53856049648d8ec76438197e5b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c69f1b53856049648d8ec76438197e5b 2024-11-11T12:43:01,467 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c69f1b53856049648d8ec76438197e5b, entries=150, sequenceid=400, filesize=12.0 K 2024-11-11T12:43:01,468 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/B/5b802ad3ff9d44d491cce39f4b1ad2a3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5b802ad3ff9d44d491cce39f4b1ad2a3 2024-11-11T12:43:01,472 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5b802ad3ff9d44d491cce39f4b1ad2a3, entries=150, sequenceid=400, filesize=12.0 K 2024-11-11T12:43:01,473 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/.tmp/C/b1135402dd3040eabd5f35c85b2e6583 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1135402dd3040eabd5f35c85b2e6583 2024-11-11T12:43:01,476 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1135402dd3040eabd5f35c85b2e6583, entries=150, sequenceid=400, filesize=12.0 K 2024-11-11T12:43:01,477 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(3040): Finished flush of dataSize ~26.84 KB/27480, heapSize ~71.02 KB/72720, currentSize=0 B/0 for 10680aa1d1802ca2e3b6db31ab7f417e in 1262ms, sequenceid=400, compaction requested=false 2024-11-11T12:43:01,478 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5071fbe10b9f4c47b82b8d62a7ccb71e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/8c401c439c7b4de1a54fb48a5885cb82, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4d43863061d74c57a6721080675db5e6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/6f145d7bd20d430caab2e5c3d892cc87, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/7791cfe3b4484fa9aa913b04a4310cac, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/56f4d6f73f7d4532aba557561fc218a7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4245c5822be948ee82a4d35519cd2e80, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c3c42b6d12884e8f8f944516fa8e60fc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/493bf71e828b49568f45863daeb1ba9e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/792e1d393b634feca8dfbb5d88521a69, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/50a8ac66b8414955b126b4f185069709, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/e27ac802f5f54604922e6441d022963c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/49e18ee0f9bd457bb14d988b77d43dfb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4c4c930b255949d5a86f2961cd67b7d0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/d14e7d4a29514c1280af60965307b2ca, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5dee5c4a608e4971bca4c2bc26062fc2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/3831d8e204904360adf57aaf7fea5798, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/dadd7a6ae4e24a85aee9bc1f137f3bd5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2ca2e412c95041b49c753b235316136e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/a42d77ae6c0c4698b1f4fae00674b391, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/432b277ece114ff69fc3bf25acccd08c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/f621ae6b0a85419ab24feb57a7c61585, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5d3818bd39d74173a6459aa14baf83d9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4b7215e8e5914b98979ce850cbdba4dd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/10a12584f4684e129e07a027c5da301c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2893e85bfc7c4bfcaac58e47080754f7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c2ca1b5dce914e568b66edaf19235391] to archive 2024-11-11T12:43:01,479 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:43:01,480 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5071fbe10b9f4c47b82b8d62a7ccb71e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5071fbe10b9f4c47b82b8d62a7ccb71e 2024-11-11T12:43:01,481 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/8c401c439c7b4de1a54fb48a5885cb82 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/8c401c439c7b4de1a54fb48a5885cb82 2024-11-11T12:43:01,482 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4d43863061d74c57a6721080675db5e6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4d43863061d74c57a6721080675db5e6 2024-11-11T12:43:01,483 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/6f145d7bd20d430caab2e5c3d892cc87 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/6f145d7bd20d430caab2e5c3d892cc87 2024-11-11T12:43:01,484 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/7791cfe3b4484fa9aa913b04a4310cac to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/7791cfe3b4484fa9aa913b04a4310cac 2024-11-11T12:43:01,485 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/56f4d6f73f7d4532aba557561fc218a7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/56f4d6f73f7d4532aba557561fc218a7 2024-11-11T12:43:01,485 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4245c5822be948ee82a4d35519cd2e80 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4245c5822be948ee82a4d35519cd2e80 2024-11-11T12:43:01,486 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c3c42b6d12884e8f8f944516fa8e60fc to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c3c42b6d12884e8f8f944516fa8e60fc 2024-11-11T12:43:01,487 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/493bf71e828b49568f45863daeb1ba9e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/493bf71e828b49568f45863daeb1ba9e 2024-11-11T12:43:01,488 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/792e1d393b634feca8dfbb5d88521a69 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/792e1d393b634feca8dfbb5d88521a69 2024-11-11T12:43:01,489 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/50a8ac66b8414955b126b4f185069709 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/50a8ac66b8414955b126b4f185069709 2024-11-11T12:43:01,491 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/e27ac802f5f54604922e6441d022963c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/e27ac802f5f54604922e6441d022963c 2024-11-11T12:43:01,492 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/49e18ee0f9bd457bb14d988b77d43dfb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/49e18ee0f9bd457bb14d988b77d43dfb 2024-11-11T12:43:01,493 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4c4c930b255949d5a86f2961cd67b7d0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4c4c930b255949d5a86f2961cd67b7d0 2024-11-11T12:43:01,494 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/d14e7d4a29514c1280af60965307b2ca to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/d14e7d4a29514c1280af60965307b2ca 2024-11-11T12:43:01,495 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5dee5c4a608e4971bca4c2bc26062fc2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5dee5c4a608e4971bca4c2bc26062fc2 2024-11-11T12:43:01,496 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/3831d8e204904360adf57aaf7fea5798 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/3831d8e204904360adf57aaf7fea5798 2024-11-11T12:43:01,497 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/dadd7a6ae4e24a85aee9bc1f137f3bd5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/dadd7a6ae4e24a85aee9bc1f137f3bd5 2024-11-11T12:43:01,499 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2ca2e412c95041b49c753b235316136e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2ca2e412c95041b49c753b235316136e 2024-11-11T12:43:01,501 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/a42d77ae6c0c4698b1f4fae00674b391 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/a42d77ae6c0c4698b1f4fae00674b391 2024-11-11T12:43:01,502 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/432b277ece114ff69fc3bf25acccd08c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/432b277ece114ff69fc3bf25acccd08c 2024-11-11T12:43:01,503 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/f621ae6b0a85419ab24feb57a7c61585 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/f621ae6b0a85419ab24feb57a7c61585 2024-11-11T12:43:01,505 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5d3818bd39d74173a6459aa14baf83d9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/5d3818bd39d74173a6459aa14baf83d9 2024-11-11T12:43:01,506 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4b7215e8e5914b98979ce850cbdba4dd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/4b7215e8e5914b98979ce850cbdba4dd 2024-11-11T12:43:01,507 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/10a12584f4684e129e07a027c5da301c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/10a12584f4684e129e07a027c5da301c 2024-11-11T12:43:01,507 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2893e85bfc7c4bfcaac58e47080754f7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/2893e85bfc7c4bfcaac58e47080754f7 2024-11-11T12:43:01,509 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c2ca1b5dce914e568b66edaf19235391 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c2ca1b5dce914e568b66edaf19235391 2024-11-11T12:43:01,510 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a41c0bc9060a4226a20d1aeb51e58d58, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a4f8803627a6452bb82abb338b4c579a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7caf14b30def4c4fb111331c4ca89e3d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/6f4ae46e7d694160ac91a0e1e11d9f71, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/87791b0c8e894d51b997cb9e5138a7cf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e15950b00bfc4254821faa5c162e709a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/bb3a19b080f040aaa2ecc949c10db180, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/895d3ebeeaf4417f90c391195a0c697a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/72a52418574548efa9be0805e551761c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/2400f65b7b5c4fbab23fdf96010a6331, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f656baddf5274c6292e6c745128bf6ef, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5046dfad2c514a2ead5febc73e7c9fc7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e033fafff1dd4c7daa94a1cd42f3044f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7bb6520220a64255ad6935f7936c764c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/b08c3493bd0248e5b2db3ce64081d3a3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e4a0fb5ab4064589bd3d357a585ff8e6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e838ce0cdec14d38b73d57cdfebf47c6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/332e9258b0144dc7871084d9217111d3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/8929b6d000dc4e9296d657ce1e1103e1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/28db06d00c2b4545b14622eed42ae298, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/c412152e5d9d4b3dbb061411dc50e37f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/665b68d3ad764299841b9f56004d2563, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f691eeb65a334f808ab4dadbd9d1ab52, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/24404422ab2d4ea4b9ed4c093a4dace6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/68138ce0583a4dcf93ee8f82f84a5410, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3c1b58663a4a4ee5a05687d33d3cf721] to archive 2024-11-11T12:43:01,511 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:43:01,513 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a41c0bc9060a4226a20d1aeb51e58d58 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a41c0bc9060a4226a20d1aeb51e58d58 2024-11-11T12:43:01,514 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a4f8803627a6452bb82abb338b4c579a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/a4f8803627a6452bb82abb338b4c579a 2024-11-11T12:43:01,515 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7caf14b30def4c4fb111331c4ca89e3d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7caf14b30def4c4fb111331c4ca89e3d 2024-11-11T12:43:01,516 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/6f4ae46e7d694160ac91a0e1e11d9f71 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/6f4ae46e7d694160ac91a0e1e11d9f71 2024-11-11T12:43:01,517 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/87791b0c8e894d51b997cb9e5138a7cf to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/87791b0c8e894d51b997cb9e5138a7cf 2024-11-11T12:43:01,518 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e15950b00bfc4254821faa5c162e709a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e15950b00bfc4254821faa5c162e709a 2024-11-11T12:43:01,519 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/bb3a19b080f040aaa2ecc949c10db180 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/bb3a19b080f040aaa2ecc949c10db180 2024-11-11T12:43:01,520 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/895d3ebeeaf4417f90c391195a0c697a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/895d3ebeeaf4417f90c391195a0c697a 2024-11-11T12:43:01,521 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/72a52418574548efa9be0805e551761c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/72a52418574548efa9be0805e551761c 2024-11-11T12:43:01,522 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/2400f65b7b5c4fbab23fdf96010a6331 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/2400f65b7b5c4fbab23fdf96010a6331 2024-11-11T12:43:01,523 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f656baddf5274c6292e6c745128bf6ef to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f656baddf5274c6292e6c745128bf6ef 2024-11-11T12:43:01,524 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5046dfad2c514a2ead5febc73e7c9fc7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5046dfad2c514a2ead5febc73e7c9fc7 2024-11-11T12:43:01,525 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e033fafff1dd4c7daa94a1cd42f3044f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e033fafff1dd4c7daa94a1cd42f3044f 2024-11-11T12:43:01,526 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7bb6520220a64255ad6935f7936c764c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/7bb6520220a64255ad6935f7936c764c 2024-11-11T12:43:01,527 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/b08c3493bd0248e5b2db3ce64081d3a3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/b08c3493bd0248e5b2db3ce64081d3a3 2024-11-11T12:43:01,528 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e4a0fb5ab4064589bd3d357a585ff8e6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e4a0fb5ab4064589bd3d357a585ff8e6 2024-11-11T12:43:01,529 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e838ce0cdec14d38b73d57cdfebf47c6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/e838ce0cdec14d38b73d57cdfebf47c6 2024-11-11T12:43:01,530 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/332e9258b0144dc7871084d9217111d3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/332e9258b0144dc7871084d9217111d3 2024-11-11T12:43:01,531 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/8929b6d000dc4e9296d657ce1e1103e1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/8929b6d000dc4e9296d657ce1e1103e1 2024-11-11T12:43:01,531 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/28db06d00c2b4545b14622eed42ae298 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/28db06d00c2b4545b14622eed42ae298 2024-11-11T12:43:01,532 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/c412152e5d9d4b3dbb061411dc50e37f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/c412152e5d9d4b3dbb061411dc50e37f 2024-11-11T12:43:01,533 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/665b68d3ad764299841b9f56004d2563 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/665b68d3ad764299841b9f56004d2563 2024-11-11T12:43:01,534 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f691eeb65a334f808ab4dadbd9d1ab52 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f691eeb65a334f808ab4dadbd9d1ab52 2024-11-11T12:43:01,535 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/24404422ab2d4ea4b9ed4c093a4dace6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/24404422ab2d4ea4b9ed4c093a4dace6 2024-11-11T12:43:01,536 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/68138ce0583a4dcf93ee8f82f84a5410 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/68138ce0583a4dcf93ee8f82f84a5410 2024-11-11T12:43:01,537 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/f4cb8ea9ee97483c97c8bd6e9b9a6ec8 2024-11-11T12:43:01,538 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3c1b58663a4a4ee5a05687d33d3cf721 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3c1b58663a4a4ee5a05687d33d3cf721 2024-11-11T12:43:01,539 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/29115167d1244af5898d5ef35aaf7016, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e04b9b1c16448b9958cfa1011c7fc52, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1aecc086061746df95aa7bccc31da5f7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/360c5c2b917446b0b1b7f4ca19762e84, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/cab46268443f4a25843d8e9bf84cb241, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/3e3d1b89abbd4a27af0049715f69bd82, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/5728761b36b74851a06dc6bdcf3bb579, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/39dcdf8b31474ffab0bcbea87fe9b339, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e3d28808f5e4864b2d99ac278abcd01, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/abef0f88bcc44447832300a29b52fafc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/0f7495b21ee04640a2334d12eb622c82, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eddb45bd6da447068e33aeb44e48a1a6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e752aa6e2bf7417fbd6edd790115fd05, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4c32921daee942e490814a5174a41d6b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e0149f21cf32448391208d6c37046aeb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1135219c5d3544d5a4b88ac82a4006f3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/f8376c2e142c41508a334f990a539203, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d9567cd402d149f78f035dc9bf3c0657, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1d96e991ffa41d2a649d6839818b820, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d3c9b3082e1d4ce8b07734d22dd0b409, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/c8b519f84cd241caac20faa17b679a3c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eb660226bd8a43c4a1ce8987575b4f72, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/2c07b35b8ea84a0287fc40e2e258e438, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1b6e1e218a9142c7b907f6b47be71543, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/244abf617709441a8064849fe41121fa] to archive 2024-11-11T12:43:01,540 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:43:01,541 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/29115167d1244af5898d5ef35aaf7016 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/29115167d1244af5898d5ef35aaf7016 2024-11-11T12:43:01,542 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e04b9b1c16448b9958cfa1011c7fc52 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e04b9b1c16448b9958cfa1011c7fc52 2024-11-11T12:43:01,543 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1aecc086061746df95aa7bccc31da5f7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1aecc086061746df95aa7bccc31da5f7 2024-11-11T12:43:01,544 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/360c5c2b917446b0b1b7f4ca19762e84 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/360c5c2b917446b0b1b7f4ca19762e84 2024-11-11T12:43:01,545 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/cab46268443f4a25843d8e9bf84cb241 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/cab46268443f4a25843d8e9bf84cb241 2024-11-11T12:43:01,546 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/3e3d1b89abbd4a27af0049715f69bd82 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/3e3d1b89abbd4a27af0049715f69bd82 2024-11-11T12:43:01,547 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/5728761b36b74851a06dc6bdcf3bb579 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/5728761b36b74851a06dc6bdcf3bb579 2024-11-11T12:43:01,548 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/6e9b354e0d6c4d1ca8d7a9b47d4c451c 2024-11-11T12:43:01,549 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/39dcdf8b31474ffab0bcbea87fe9b339 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/39dcdf8b31474ffab0bcbea87fe9b339 2024-11-11T12:43:01,550 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e3d28808f5e4864b2d99ac278abcd01 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4e3d28808f5e4864b2d99ac278abcd01 2024-11-11T12:43:01,552 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/abef0f88bcc44447832300a29b52fafc to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/abef0f88bcc44447832300a29b52fafc 2024-11-11T12:43:01,553 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/0f7495b21ee04640a2334d12eb622c82 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/0f7495b21ee04640a2334d12eb622c82 2024-11-11T12:43:01,554 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eddb45bd6da447068e33aeb44e48a1a6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eddb45bd6da447068e33aeb44e48a1a6 2024-11-11T12:43:01,555 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e752aa6e2bf7417fbd6edd790115fd05 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e752aa6e2bf7417fbd6edd790115fd05 2024-11-11T12:43:01,556 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4c32921daee942e490814a5174a41d6b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/4c32921daee942e490814a5174a41d6b 2024-11-11T12:43:01,557 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e0149f21cf32448391208d6c37046aeb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/e0149f21cf32448391208d6c37046aeb 2024-11-11T12:43:01,558 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1135219c5d3544d5a4b88ac82a4006f3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1135219c5d3544d5a4b88ac82a4006f3 2024-11-11T12:43:01,559 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/f8376c2e142c41508a334f990a539203 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/f8376c2e142c41508a334f990a539203 2024-11-11T12:43:01,560 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d9567cd402d149f78f035dc9bf3c0657 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d9567cd402d149f78f035dc9bf3c0657 2024-11-11T12:43:01,561 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1d96e991ffa41d2a649d6839818b820 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1d96e991ffa41d2a649d6839818b820 2024-11-11T12:43:01,562 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d3c9b3082e1d4ce8b07734d22dd0b409 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/d3c9b3082e1d4ce8b07734d22dd0b409 2024-11-11T12:43:01,563 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/c8b519f84cd241caac20faa17b679a3c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/c8b519f84cd241caac20faa17b679a3c 2024-11-11T12:43:01,565 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eb660226bd8a43c4a1ce8987575b4f72 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/eb660226bd8a43c4a1ce8987575b4f72 2024-11-11T12:43:01,566 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/2c07b35b8ea84a0287fc40e2e258e438 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/2c07b35b8ea84a0287fc40e2e258e438 2024-11-11T12:43:01,566 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1b6e1e218a9142c7b907f6b47be71543 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/1b6e1e218a9142c7b907f6b47be71543 2024-11-11T12:43:01,567 DEBUG [StoreCloser-TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/244abf617709441a8064849fe41121fa to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/244abf617709441a8064849fe41121fa 2024-11-11T12:43:01,571 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/recovered.edits/403.seqid, newMaxSeqId=403, maxSeqId=1 2024-11-11T12:43:01,572 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e. 2024-11-11T12:43:01,572 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] regionserver.HRegion(1635): Region close journal for 10680aa1d1802ca2e3b6db31ab7f417e: 2024-11-11T12:43:01,573 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=92}] handler.UnassignRegionHandler(170): Closed 10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:43:01,574 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=91 updating hbase:meta row=10680aa1d1802ca2e3b6db31ab7f417e, regionState=CLOSED 2024-11-11T12:43:01,576 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=92, resume processing ppid=91 2024-11-11T12:43:01,576 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, ppid=91, state=SUCCESS; CloseRegionProcedure 10680aa1d1802ca2e3b6db31ab7f417e, server=32e78532c8b1,44673,1731328897232 in 1.5130 sec 2024-11-11T12:43:01,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=91, resume processing ppid=90 2024-11-11T12:43:01,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=90, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=10680aa1d1802ca2e3b6db31ab7f417e, UNASSIGN in 1.5170 sec 2024-11-11T12:43:01,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-11-11T12:43:01,578 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.5190 sec 2024-11-11T12:43:01,579 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328981579"}]},"ts":"1731328981579"} 2024-11-11T12:43:01,580 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-11T12:43:01,582 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-11T12:43:01,583 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.5310 sec 2024-11-11T12:43:02,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-11-11T12:43:02,158 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 89 completed 2024-11-11T12:43:02,159 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-11T12:43:02,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:02,160 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=93, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:02,161 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=93, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:02,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-11T12:43:02,168 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:43:02,173 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/recovered.edits] 2024-11-11T12:43:02,177 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/72da51cc7e7e471b91c809f95c0858a2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/72da51cc7e7e471b91c809f95c0858a2 2024-11-11T12:43:02,178 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c69f1b53856049648d8ec76438197e5b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/A/c69f1b53856049648d8ec76438197e5b 2024-11-11T12:43:02,181 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3b168d4c9bda41e08819ad433ab8987d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/3b168d4c9bda41e08819ad433ab8987d 2024-11-11T12:43:02,182 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5b802ad3ff9d44d491cce39f4b1ad2a3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/B/5b802ad3ff9d44d491cce39f4b1ad2a3 2024-11-11T12:43:02,189 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/21548eed47e84fa08d37b0b65eee3740 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/21548eed47e84fa08d37b0b65eee3740 2024-11-11T12:43:02,190 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1135402dd3040eabd5f35c85b2e6583 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/C/b1135402dd3040eabd5f35c85b2e6583 2024-11-11T12:43:02,195 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/recovered.edits/403.seqid to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e/recovered.edits/403.seqid 2024-11-11T12:43:02,195 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/10680aa1d1802ca2e3b6db31ab7f417e 2024-11-11T12:43:02,196 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-11T12:43:02,201 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=93, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:02,211 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-11T12:43:02,219 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-11T12:43:02,225 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=93, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:02,225 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-11T12:43:02,225 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731328982225"}]},"ts":"9223372036854775807"} 2024-11-11T12:43:02,243 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-11T12:43:02,243 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 10680aa1d1802ca2e3b6db31ab7f417e, NAME => 'TestAcidGuarantees,,1731328955164.10680aa1d1802ca2e3b6db31ab7f417e.', STARTKEY => '', ENDKEY => ''}] 2024-11-11T12:43:02,243 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-11T12:43:02,243 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731328982243"}]},"ts":"9223372036854775807"} 2024-11-11T12:43:02,260 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-11T12:43:02,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-11T12:43:02,293 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=93, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:02,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 134 msec 2024-11-11T12:43:02,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=93 2024-11-11T12:43:02,467 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 93 completed 2024-11-11T12:43:02,479 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testGetAtomicity Thread=238 (was 242), OpenFileDescriptor=456 (was 470), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=904 (was 851) - SystemLoadAverage LEAK? -, ProcessCount=9 (was 11), AvailableMemoryMB=3271 (was 1799) - AvailableMemoryMB LEAK? - 2024-11-11T12:43:02,489 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=238, OpenFileDescriptor=456, MaxFileDescriptor=1048576, SystemLoadAverage=904, ProcessCount=9, AvailableMemoryMB=3270 2024-11-11T12:43:02,490 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:43:02,491 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:43:02,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:02,493 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T12:43:02,493 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:02,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 94 2024-11-11T12:43:02,494 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T12:43:02,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-11T12:43:02,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742168_1344 (size=960) 2024-11-11T12:43:02,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-11T12:43:02,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-11T12:43:02,920 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:43:02,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742169_1345 (size=53) 2024-11-11T12:43:02,930 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:43:02,930 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing 99827bdf8e81fc8bb34d29fe73f0a358, disabling compactions & flushes 2024-11-11T12:43:02,930 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:02,930 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:02,930 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. after waiting 0 ms 2024-11-11T12:43:02,930 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:02,931 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:02,931 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:02,932 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T12:43:02,932 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731328982932"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731328982932"}]},"ts":"1731328982932"} 2024-11-11T12:43:02,933 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T12:43:02,934 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T12:43:02,934 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328982934"}]},"ts":"1731328982934"} 2024-11-11T12:43:02,935 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-11T12:43:02,939 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, ASSIGN}] 2024-11-11T12:43:02,940 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, ASSIGN 2024-11-11T12:43:02,941 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=95, ppid=94, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, ASSIGN; state=OFFLINE, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=false 2024-11-11T12:43:03,091 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=99827bdf8e81fc8bb34d29fe73f0a358, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:03,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; OpenRegionProcedure 99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:43:03,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-11T12:43:03,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:03,247 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:03,247 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7285): Opening region: {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:43:03,248 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,248 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:43:03,248 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7327): checking encryption for 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,248 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(7330): checking classloading for 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,249 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,251 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:03,251 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99827bdf8e81fc8bb34d29fe73f0a358 columnFamilyName A 2024-11-11T12:43:03,251 DEBUG [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:03,252 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(327): Store=99827bdf8e81fc8bb34d29fe73f0a358/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:03,252 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,253 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:03,253 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99827bdf8e81fc8bb34d29fe73f0a358 columnFamilyName B 2024-11-11T12:43:03,253 DEBUG [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:03,254 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(327): Store=99827bdf8e81fc8bb34d29fe73f0a358/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:03,254 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,255 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:03,255 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99827bdf8e81fc8bb34d29fe73f0a358 columnFamilyName C 2024-11-11T12:43:03,255 DEBUG [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:03,255 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(327): Store=99827bdf8e81fc8bb34d29fe73f0a358/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:03,256 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:03,256 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,257 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,258 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:43:03,259 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1085): writing seq id for 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:03,260 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:43:03,261 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1102): Opened 99827bdf8e81fc8bb34d29fe73f0a358; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60477077, jitterRate=-0.09882132709026337}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:43:03,262 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegion(1001): Region open journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:03,262 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., pid=96, masterSystemTime=1731328983244 2024-11-11T12:43:03,265 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:03,265 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=96}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:03,265 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=99827bdf8e81fc8bb34d29fe73f0a358, regionState=OPEN, openSeqNum=2, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:03,267 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-11-11T12:43:03,267 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; OpenRegionProcedure 99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 in 174 msec 2024-11-11T12:43:03,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=94 2024-11-11T12:43:03,269 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=94, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, ASSIGN in 328 msec 2024-11-11T12:43:03,269 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T12:43:03,270 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731328983269"}]},"ts":"1731328983269"} 2024-11-11T12:43:03,271 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-11T12:43:03,279 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=94, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T12:43:03,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 788 msec 2024-11-11T12:43:03,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=94 2024-11-11T12:43:03,599 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 94 completed 2024-11-11T12:43:03,601 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1df61dc9 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5fe71801 2024-11-11T12:43:03,605 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf5e2f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:03,608 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:03,610 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60870, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:03,611 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T12:43:03,612 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:46370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T12:43:03,614 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:43:03,614 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:43:03,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=97, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:03,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742170_1346 (size=996) 2024-11-11T12:43:04,034 DEBUG [PEWorker-1 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-11T12:43:04,034 INFO [PEWorker-1 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-11T12:43:04,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=98, ppid=97, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:43:04,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, REOPEN/MOVE}] 2024-11-11T12:43:04,039 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, REOPEN/MOVE 2024-11-11T12:43:04,040 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=99827bdf8e81fc8bb34d29fe73f0a358, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,041 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:43:04,041 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE; CloseRegionProcedure 99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:43:04,193 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,193 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(124): Close 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,193 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:43:04,193 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1681): Closing 99827bdf8e81fc8bb34d29fe73f0a358, disabling compactions & flushes 2024-11-11T12:43:04,193 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,193 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,193 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. after waiting 0 ms 2024-11-11T12:43:04,193 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,197 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-11T12:43:04,198 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,198 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegion(1635): Region close journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:04,198 WARN [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] regionserver.HRegionServer(3786): Not adding moved region record: 99827bdf8e81fc8bb34d29fe73f0a358 to self. 2024-11-11T12:43:04,199 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=100}] handler.UnassignRegionHandler(170): Closed 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,199 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=99827bdf8e81fc8bb34d29fe73f0a358, regionState=CLOSED 2024-11-11T12:43:04,201 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-11-11T12:43:04,201 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; CloseRegionProcedure 99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 in 159 msec 2024-11-11T12:43:04,202 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=99, ppid=98, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, REOPEN/MOVE; state=CLOSED, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=true 2024-11-11T12:43:04,352 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=99827bdf8e81fc8bb34d29fe73f0a358, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,353 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=101, ppid=99, state=RUNNABLE; OpenRegionProcedure 99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:43:04,504 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,506 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,507 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7285): Opening region: {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:43:04,507 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,507 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:43:04,507 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7327): checking encryption for 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,507 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(7330): checking classloading for 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,508 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,509 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:04,509 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99827bdf8e81fc8bb34d29fe73f0a358 columnFamilyName A 2024-11-11T12:43:04,510 DEBUG [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:04,511 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(327): Store=99827bdf8e81fc8bb34d29fe73f0a358/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:04,511 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,512 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:04,512 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99827bdf8e81fc8bb34d29fe73f0a358 columnFamilyName B 2024-11-11T12:43:04,512 DEBUG [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:04,513 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(327): Store=99827bdf8e81fc8bb34d29fe73f0a358/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:04,513 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,513 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:04,513 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 99827bdf8e81fc8bb34d29fe73f0a358 columnFamilyName C 2024-11-11T12:43:04,513 DEBUG [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:04,514 INFO [StoreOpener-99827bdf8e81fc8bb34d29fe73f0a358-1 {}] regionserver.HStore(327): Store=99827bdf8e81fc8bb34d29fe73f0a358/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:04,514 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,514 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,515 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,516 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:43:04,517 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1085): writing seq id for 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,518 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1102): Opened 99827bdf8e81fc8bb34d29fe73f0a358; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64060182, jitterRate=-0.04542890191078186}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:43:04,518 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegion(1001): Region open journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:04,519 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., pid=101, masterSystemTime=1731328984504 2024-11-11T12:43:04,520 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,520 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=101}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,520 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=99 updating hbase:meta row=99827bdf8e81fc8bb34d29fe73f0a358, regionState=OPEN, openSeqNum=5, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=101, resume processing ppid=99 2024-11-11T12:43:04,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; OpenRegionProcedure 99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 in 168 msec 2024-11-11T12:43:04,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=99, resume processing ppid=98 2024-11-11T12:43:04,523 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, ppid=98, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, REOPEN/MOVE in 483 msec 2024-11-11T12:43:04,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=98, resume processing ppid=97 2024-11-11T12:43:04,525 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, ppid=97, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 487 msec 2024-11-11T12:43:04,526 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 910 msec 2024-11-11T12:43:04,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=97 2024-11-11T12:43:04,528 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3637e4c6 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51f7d511 2024-11-11T12:43:04,533 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75b14fbd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,534 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x72f422b4 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1dc42ea6 2024-11-11T12:43:04,537 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@62f74604, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,537 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2df33cdf to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@117e86d9 2024-11-11T12:43:04,539 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@49e13594, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,540 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09f472e0 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6cd96549 2024-11-11T12:43:04,543 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c54a0d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,543 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x167a78b0 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@31aea41b 2024-11-11T12:43:04,549 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3875c8c5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,550 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e247aa1 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@801ba40 2024-11-11T12:43:04,555 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@319559be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,556 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2205f666 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@27539bdc 2024-11-11T12:43:04,560 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c907e21, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,561 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6584e9ce to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5e3203d9 2024-11-11T12:43:04,569 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61ec0f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,570 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x37ec8e3b to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@798e7fd4 2024-11-11T12:43:04,573 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7819b9e2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,574 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x787e5169 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7284f16d 2024-11-11T12:43:04,577 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47679076, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:04,581 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:04,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees 2024-11-11T12:43:04,583 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:04,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-11T12:43:04,584 DEBUG [hconnection-0x404e4d5c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,584 DEBUG [hconnection-0x3be3382a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,584 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=102, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:04,584 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=102, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:04,584 DEBUG [hconnection-0x2850ff85-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,585 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60884, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,585 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60894, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,585 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60882, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,588 DEBUG [hconnection-0x512dbd84-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,588 DEBUG [hconnection-0x6117c1e8-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,589 DEBUG [hconnection-0x79f23f58-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,589 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60908, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,590 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60934, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,590 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60918, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,591 DEBUG [hconnection-0x6121c855-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,591 DEBUG [hconnection-0x169ace45-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,592 DEBUG [hconnection-0x15bd9c22-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,592 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60954, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,592 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60938, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,593 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60964, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,594 DEBUG [hconnection-0x74e1600c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:04,595 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:60980, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:04,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,606 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:43:04,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:04,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:04,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:04,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:04,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:04,606 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:04,641 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329044639, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329044641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,642 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329044641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,645 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329044644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329044644, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,663 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e8cf033c03154a059fdc68c643c541d2_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328984604/Put/seqid=0 2024-11-11T12:43:04,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742171_1347 (size=12154) 2024-11-11T12:43:04,669 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:04,674 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e8cf033c03154a059fdc68c643c541d2_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e8cf033c03154a059fdc68c643c541d2_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:04,676 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/e11dde57a90a45c8810b69e350c75929, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:04,677 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/e11dde57a90a45c8810b69e350c75929 is 175, key is test_row_0/A:col10/1731328984604/Put/seqid=0 2024-11-11T12:43:04,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-11T12:43:04,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742172_1348 (size=30955) 2024-11-11T12:43:04,736 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,736 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:04,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:04,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,737 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:04,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:04,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:04,743 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329044743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,744 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329044743, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,749 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,749 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329044746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329044746, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,751 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329044750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,889 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,890 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:04,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:04,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:04,891 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:04,891 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:04,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:04,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-11T12:43:04,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329044946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,947 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329044946, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329044950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,953 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329044951, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,954 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:04,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329044953, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:04,957 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T12:43:05,048 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,049 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:05,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:05,049 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,049 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,115 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/e11dde57a90a45c8810b69e350c75929 2024-11-11T12:43:05,168 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/550498b38c064ae8b3361f4c237e5731 is 50, key is test_row_0/B:col10/1731328984604/Put/seqid=0 2024-11-11T12:43:05,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742173_1349 (size=12001) 2024-11-11T12:43:05,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-11T12:43:05,213 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,219 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:05,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:05,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,219 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,219 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329045251, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 16 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329045254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,257 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329045254, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,258 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329045255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,258 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,258 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329045257, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,376 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:05,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:05,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,534 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:05,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:05,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,584 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/550498b38c064ae8b3361f4c237e5731 2024-11-11T12:43:05,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/948b88661a54467cabb435c9fd7ecde8 is 50, key is test_row_0/C:col10/1731328984604/Put/seqid=0 2024-11-11T12:43:05,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742174_1350 (size=12001) 2024-11-11T12:43:05,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/948b88661a54467cabb435c9fd7ecde8 2024-11-11T12:43:05,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/e11dde57a90a45c8810b69e350c75929 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e11dde57a90a45c8810b69e350c75929 2024-11-11T12:43:05,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e11dde57a90a45c8810b69e350c75929, entries=150, sequenceid=15, filesize=30.2 K 2024-11-11T12:43:05,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/550498b38c064ae8b3361f4c237e5731 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/550498b38c064ae8b3361f4c237e5731 2024-11-11T12:43:05,688 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:05,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:05,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,689 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-11T12:43:05,713 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/550498b38c064ae8b3361f4c237e5731, entries=150, sequenceid=15, filesize=11.7 K 2024-11-11T12:43:05,719 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/948b88661a54467cabb435c9fd7ecde8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/948b88661a54467cabb435c9fd7ecde8 2024-11-11T12:43:05,730 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/948b88661a54467cabb435c9fd7ecde8, entries=150, sequenceid=15, filesize=11.7 K 2024-11-11T12:43:05,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for 99827bdf8e81fc8bb34d29fe73f0a358 in 1126ms, sequenceid=15, compaction requested=false 2024-11-11T12:43:05,732 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:05,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:05,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=167.72 KB heapSize=440.20 KB 2024-11-11T12:43:05,760 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:05,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:05,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:05,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:05,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:05,761 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:05,770 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329045767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329045768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,771 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329045768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,772 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329045769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,773 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329045770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,809 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111c277645f5f494bf19dd587ec27dffced_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328984640/Put/seqid=0 2024-11-11T12:43:05,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742175_1351 (size=12154) 2024-11-11T12:43:05,841 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,848 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:05,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:05,848 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:05,848 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,849 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:05,873 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329045872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329045873, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:05,879 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329045876, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,001 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,002 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:06,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:06,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,002 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,002 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,081 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329046079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,083 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329046082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,085 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329046084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,155 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,155 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:06,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:06,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,155 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,156 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,237 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:06,256 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111c277645f5f494bf19dd587ec27dffced_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c277645f5f494bf19dd587ec27dffced_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:06,257 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/244408076476443c924d96038a770766, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:06,258 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/244408076476443c924d96038a770766 is 175, key is test_row_0/A:col10/1731328984640/Put/seqid=0 2024-11-11T12:43:06,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742176_1352 (size=30955) 2024-11-11T12:43:06,307 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,311 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:06,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:06,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,315 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,388 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329046388, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,393 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329046389, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,398 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329046396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,468 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,468 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:06,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:06,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,469 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,469 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,621 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,621 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:06,621 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:06,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,622 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,622 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,694 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=43, memsize=55.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/244408076476443c924d96038a770766 2024-11-11T12:43:06,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/1d777de6bdec4b6388566065ca466a70 is 50, key is test_row_0/B:col10/1731328984640/Put/seqid=0 2024-11-11T12:43:06,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-11T12:43:06,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742177_1353 (size=12001) 2024-11-11T12:43:06,774 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,774 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:06,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:06,774 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,775 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,783 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329046780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,784 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329046780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,892 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329046890, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,899 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329046895, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,904 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:06,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329046903, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,928 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:06,932 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:06,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:06,932 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:06,932 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,933 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:06,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,087 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:07,087 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:07,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:07,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,088 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,088 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,104 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T12:43:07,109 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/1d777de6bdec4b6388566065ca466a70 2024-11-11T12:43:07,131 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/f353a3a5bb674cf8ab5d355f8c1f81ff is 50, key is test_row_0/C:col10/1731328984640/Put/seqid=0 2024-11-11T12:43:07,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742178_1354 (size=12001) 2024-11-11T12:43:07,244 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:07,245 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:07,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:07,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,414 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:07,414 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:07,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:07,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,415 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,415 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,569 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:07,570 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:07,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:07,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,570 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] handler.RSProcedureHandler(58): pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,570 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=103 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=103 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:07,574 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=55.91 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/f353a3a5bb674cf8ab5d355f8c1f81ff 2024-11-11T12:43:07,581 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/244408076476443c924d96038a770766 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/244408076476443c924d96038a770766 2024-11-11T12:43:07,586 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/244408076476443c924d96038a770766, entries=150, sequenceid=43, filesize=30.2 K 2024-11-11T12:43:07,587 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/1d777de6bdec4b6388566065ca466a70 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1d777de6bdec4b6388566065ca466a70 2024-11-11T12:43:07,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1d777de6bdec4b6388566065ca466a70, entries=150, sequenceid=43, filesize=11.7 K 2024-11-11T12:43:07,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/f353a3a5bb674cf8ab5d355f8c1f81ff as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/f353a3a5bb674cf8ab5d355f8c1f81ff 2024-11-11T12:43:07,598 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/f353a3a5bb674cf8ab5d355f8c1f81ff, entries=150, sequenceid=43, filesize=11.7 K 2024-11-11T12:43:07,600 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~167.72 KB/171750, heapSize ~440.16 KB/450720, currentSize=33.54 KB/34350 for 99827bdf8e81fc8bb34d29fe73f0a358 in 1840ms, sequenceid=43, compaction requested=false 2024-11-11T12:43:07,600 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:07,725 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:07,725 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=103 2024-11-11T12:43:07,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:07,726 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-11T12:43:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:07,726 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:07,745 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411117e3be542fc1249ba832008a44f20ccd2_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328985763/Put/seqid=0 2024-11-11T12:43:07,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742179_1355 (size=12154) 2024-11-11T12:43:07,751 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:07,755 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411117e3be542fc1249ba832008a44f20ccd2_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411117e3be542fc1249ba832008a44f20ccd2_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:07,756 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/5dd93ba1f79846aa8ce1ef8fceeea0cd, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:07,757 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/5dd93ba1f79846aa8ce1ef8fceeea0cd is 175, key is test_row_0/A:col10/1731328985763/Put/seqid=0 2024-11-11T12:43:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742180_1356 (size=30955) 2024-11-11T12:43:07,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:07,909 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:07,972 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:07,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329047967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:07,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329047972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:07,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:07,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329047972, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,077 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329048074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,084 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329048083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,090 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329048089, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,180 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=51, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/5dd93ba1f79846aa8ce1ef8fceeea0cd 2024-11-11T12:43:08,198 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/1f2203ef17f4480da1ad14db506efb47 is 50, key is test_row_0/B:col10/1731328985763/Put/seqid=0 2024-11-11T12:43:08,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742181_1357 (size=12001) 2024-11-11T12:43:08,272 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/1f2203ef17f4480da1ad14db506efb47 2024-11-11T12:43:08,287 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/324460ff5be7429ca320b34ec9d68254 is 50, key is test_row_0/C:col10/1731328985763/Put/seqid=0 2024-11-11T12:43:08,287 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329048283, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329048289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329048293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742182_1358 (size=12001) 2024-11-11T12:43:08,321 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=51 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/324460ff5be7429ca320b34ec9d68254 2024-11-11T12:43:08,327 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/5dd93ba1f79846aa8ce1ef8fceeea0cd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/5dd93ba1f79846aa8ce1ef8fceeea0cd 2024-11-11T12:43:08,331 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/5dd93ba1f79846aa8ce1ef8fceeea0cd, entries=150, sequenceid=51, filesize=30.2 K 2024-11-11T12:43:08,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/1f2203ef17f4480da1ad14db506efb47 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1f2203ef17f4480da1ad14db506efb47 2024-11-11T12:43:08,336 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1f2203ef17f4480da1ad14db506efb47, entries=150, sequenceid=51, filesize=11.7 K 2024-11-11T12:43:08,337 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/324460ff5be7429ca320b34ec9d68254 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/324460ff5be7429ca320b34ec9d68254 2024-11-11T12:43:08,342 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/324460ff5be7429ca320b34ec9d68254, entries=150, sequenceid=51, filesize=11.7 K 2024-11-11T12:43:08,344 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=174.43 KB/178620 for 99827bdf8e81fc8bb34d29fe73f0a358 in 618ms, sequenceid=51, compaction requested=true 2024-11-11T12:43:08,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:08,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:08,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-11T12:43:08,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=103 2024-11-11T12:43:08,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=102 2024-11-11T12:43:08,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.7620 sec 2024-11-11T12:43:08,365 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=102, table=TestAcidGuarantees in 3.7690 sec 2024-11-11T12:43:08,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=187.85 KB heapSize=492.94 KB 2024-11-11T12:43:08,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:08,598 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:08,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:08,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:08,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:08,599 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:08,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:08,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329048603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,609 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329048607, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,610 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329048608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411115e1eaf4d4e5c45baa7ddf824676b6b69_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328988593/Put/seqid=0 2024-11-11T12:43:08,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742183_1359 (size=17034) 2024-11-11T12:43:08,634 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:08,640 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411115e1eaf4d4e5c45baa7ddf824676b6b69_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115e1eaf4d4e5c45baa7ddf824676b6b69_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:08,647 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/b2e73d4f27e7408192442ffefab9e554, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:08,648 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/b2e73d4f27e7408192442ffefab9e554 is 175, key is test_row_0/A:col10/1731328988593/Put/seqid=0 2024-11-11T12:43:08,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742184_1360 (size=48139) 2024-11-11T12:43:08,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=102 2024-11-11T12:43:08,711 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 102 completed 2024-11-11T12:43:08,713 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:08,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees 2024-11-11T12:43:08,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329048710, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-11T12:43:08,715 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329048711, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,716 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:08,718 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=104, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:08,718 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329048797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329048796, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,799 DEBUG [Thread-1572 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4157 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:08,800 DEBUG [Thread-1564 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4159 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:08,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-11T12:43:08,872 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,872 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-11T12:43:08,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:08,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:08,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:08,873 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:08,873 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:08,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:08,919 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329048916, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:08,920 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:08,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329048917, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-11T12:43:09,031 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,032 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-11T12:43:09,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:09,032 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,032 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,033 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,074 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=82, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/b2e73d4f27e7408192442ffefab9e554 2024-11-11T12:43:09,110 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/4f214173249243a6aeeff5debc473489 is 50, key is test_row_0/B:col10/1731328988593/Put/seqid=0 2024-11-11T12:43:09,116 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:09,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329049111, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742185_1361 (size=12001) 2024-11-11T12:43:09,195 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,196 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-11T12:43:09,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:09,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,196 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,196 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,227 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:09,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329049224, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,230 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:09,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329049228, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-11T12:43:09,349 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,352 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-11T12:43:09,353 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:09,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,354 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,510 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-11T12:43:09,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:09,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] handler.RSProcedureHandler(58): pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=105 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=105 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,533 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/4f214173249243a6aeeff5debc473489 2024-11-11T12:43:09,582 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/249666bb872b49d6a3555689be3011b6 is 50, key is test_row_0/C:col10/1731328988593/Put/seqid=0 2024-11-11T12:43:09,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742186_1362 (size=12001) 2024-11-11T12:43:09,596 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=82 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/249666bb872b49d6a3555689be3011b6 2024-11-11T12:43:09,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/b2e73d4f27e7408192442ffefab9e554 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/b2e73d4f27e7408192442ffefab9e554 2024-11-11T12:43:09,612 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/b2e73d4f27e7408192442ffefab9e554, entries=250, sequenceid=82, filesize=47.0 K 2024-11-11T12:43:09,613 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/4f214173249243a6aeeff5debc473489 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4f214173249243a6aeeff5debc473489 2024-11-11T12:43:09,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,617 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4f214173249243a6aeeff5debc473489, entries=150, sequenceid=82, filesize=11.7 K 2024-11-11T12:43:09,618 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/249666bb872b49d6a3555689be3011b6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/249666bb872b49d6a3555689be3011b6 2024-11-11T12:43:09,620 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:42421 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:42421,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:09,622 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/249666bb872b49d6a3555689be3011b6, entries=150, sequenceid=82, filesize=11.7 K 2024-11-11T12:43:09,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=13.42 KB/13740 for 99827bdf8e81fc8bb34d29fe73f0a358 in 1025ms, sequenceid=82, compaction requested=true 2024-11-11T12:43:09,623 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:09,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:09,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:09,623 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:09,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:09,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:09,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:09,623 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:09,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:09,624 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 141004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:09,624 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:09,624 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:09,624 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:09,624 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,624 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,625 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/550498b38c064ae8b3361f4c237e5731, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1d777de6bdec4b6388566065ca466a70, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1f2203ef17f4480da1ad14db506efb47, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4f214173249243a6aeeff5debc473489] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=46.9 K 2024-11-11T12:43:09,625 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e11dde57a90a45c8810b69e350c75929, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/244408076476443c924d96038a770766, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/5dd93ba1f79846aa8ce1ef8fceeea0cd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/b2e73d4f27e7408192442ffefab9e554] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=137.7 K 2024-11-11T12:43:09,625 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,625 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e11dde57a90a45c8810b69e350c75929, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/244408076476443c924d96038a770766, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/5dd93ba1f79846aa8ce1ef8fceeea0cd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/b2e73d4f27e7408192442ffefab9e554] 2024-11-11T12:43:09,625 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 550498b38c064ae8b3361f4c237e5731, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731328984592 2024-11-11T12:43:09,625 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e11dde57a90a45c8810b69e350c75929, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731328984592 2024-11-11T12:43:09,625 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d777de6bdec4b6388566065ca466a70, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731328984639 2024-11-11T12:43:09,625 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 244408076476443c924d96038a770766, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731328984639 2024-11-11T12:43:09,625 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1f2203ef17f4480da1ad14db506efb47, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731328985763 2024-11-11T12:43:09,626 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5dd93ba1f79846aa8ce1ef8fceeea0cd, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731328985763 2024-11-11T12:43:09,626 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4f214173249243a6aeeff5debc473489, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731328987959 2024-11-11T12:43:09,626 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b2e73d4f27e7408192442ffefab9e554, keycount=250, bloomtype=ROW, size=47.0 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731328987956 2024-11-11T12:43:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,662 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:09,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,665 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111f6d8d894f90a4d1eb9c89e55c52a6c8d_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:09,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,666 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:09,667 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=105 2024-11-11T12:43:09,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:09,667 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=13.42 KB heapSize=35.91 KB 2024-11-11T12:43:09,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:09,667 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:09,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:09,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:09,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:09,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:09,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,669 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#309 average throughput is 1.09 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:09,669 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/66f34f4b4b0144d0b192d38cbe7b953c is 50, key is test_row_0/B:col10/1731328988593/Put/seqid=0 2024-11-11T12:43:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,674 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111f6d8d894f90a4d1eb9c89e55c52a6c8d_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:09,674 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111f6d8d894f90a4d1eb9c89e55c52a6c8d_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:09,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742187_1363 (size=12139) 2024-11-11T12:43:09,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,698 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ce24b40c10394451a8a66fefc5550633_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328988606/Put/seqid=0 2024-11-11T12:43:09,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,699 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742188_1364 (size=4469) 2024-11-11T12:43:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,702 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#308 average throughput is 0.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:09,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,707 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/ce3a6b4507144d768fb0598965f34ea8 is 175, key is test_row_0/A:col10/1731328988593/Put/seqid=0 2024-11-11T12:43:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742190_1366 (size=31093) 2024-11-11T12:43:09,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742189_1365 (size=7274) 2024-11-11T12:43:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,787 INFO [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] io.ByteBuffAllocator(388): Pool already reached its max capacity : 186 and no free buffers now. Consider increasing the value for 'hbase.server.allocator.max.buffer.count' ? 2024-11-11T12:43:09,788 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] io.ByteBuffAllocator(388): Pool already reached its max capacity : 186 and no free buffers now. Consider increasing the value for 'hbase.server.allocator.max.buffer.count' ? 2024-11-11T12:43:09,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-11T12:43:09,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:09,974 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:09,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,977 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:09,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:10,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:10,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:10,072 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329050070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,076 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329050072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,098 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/66f34f4b4b0144d0b192d38cbe7b953c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/66f34f4b4b0144d0b192d38cbe7b953c 2024-11-11T12:43:10,103 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into 66f34f4b4b0144d0b192d38cbe7b953c(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:10,103 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:10,103 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=12, startTime=1731328989623; duration=0sec 2024-11-11T12:43:10,103 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:10,103 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:10,103 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:10,109 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48004 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:10,109 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/C is initiating minor compaction (all files) 2024-11-11T12:43:10,109 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/C in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:10,109 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/948b88661a54467cabb435c9fd7ecde8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/f353a3a5bb674cf8ab5d355f8c1f81ff, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/324460ff5be7429ca320b34ec9d68254, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/249666bb872b49d6a3555689be3011b6] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=46.9 K 2024-11-11T12:43:10,109 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 948b88661a54467cabb435c9fd7ecde8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731328984592 2024-11-11T12:43:10,110 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f353a3a5bb674cf8ab5d355f8c1f81ff, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1731328984639 2024-11-11T12:43:10,110 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 324460ff5be7429ca320b34ec9d68254, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=51, earliestPutTs=1731328985763 2024-11-11T12:43:10,110 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 249666bb872b49d6a3555689be3011b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731328987959 2024-11-11T12:43:10,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:10,127 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/ce3a6b4507144d768fb0598965f34ea8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ce3a6b4507144d768fb0598965f34ea8 2024-11-11T12:43:10,129 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#C#compaction#311 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:10,129 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ce24b40c10394451a8a66fefc5550633_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ce24b40c10394451a8a66fefc5550633_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:10,129 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/ba7fb8a85cda493f942ed77b87ec7b62 is 50, key is test_row_0/C:col10/1731328988593/Put/seqid=0 2024-11-11T12:43:10,130 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,130 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c4c849d3afe742bda1cc7145dc8338f5, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:10,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329050126, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c4c849d3afe742bda1cc7145dc8338f5 is 175, key is test_row_0/A:col10/1731328988606/Put/seqid=0 2024-11-11T12:43:10,133 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into ce3a6b4507144d768fb0598965f34ea8(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:10,133 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:10,133 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=12, startTime=1731328989623; duration=0sec 2024-11-11T12:43:10,133 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:10,133 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:10,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742192_1368 (size=13765) 2024-11-11T12:43:10,140 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=87, memsize=4.5 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c4c849d3afe742bda1cc7145dc8338f5 2024-11-11T12:43:10,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742191_1367 (size=12139) 2024-11-11T12:43:10,155 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/bd5567cb4d3a432cbcd373cba4c11f70 is 50, key is test_row_0/B:col10/1731328988606/Put/seqid=0 2024-11-11T12:43:10,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742193_1369 (size=7315) 2024-11-11T12:43:10,179 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/bd5567cb4d3a432cbcd373cba4c11f70 2024-11-11T12:43:10,179 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329050174, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,181 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329050177, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/76ef4c3e075348098c6557651a470f9b is 50, key is test_row_0/C:col10/1731328988606/Put/seqid=0 2024-11-11T12:43:10,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742194_1370 (size=7315) 2024-11-11T12:43:10,239 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.47 KB at sequenceid=87 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/76ef4c3e075348098c6557651a470f9b 2024-11-11T12:43:10,246 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c4c849d3afe742bda1cc7145dc8338f5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c4c849d3afe742bda1cc7145dc8338f5 2024-11-11T12:43:10,252 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c4c849d3afe742bda1cc7145dc8338f5, entries=50, sequenceid=87, filesize=13.4 K 2024-11-11T12:43:10,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/bd5567cb4d3a432cbcd373cba4c11f70 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bd5567cb4d3a432cbcd373cba4c11f70 2024-11-11T12:43:10,265 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bd5567cb4d3a432cbcd373cba4c11f70, entries=50, sequenceid=87, filesize=7.1 K 2024-11-11T12:43:10,266 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/76ef4c3e075348098c6557651a470f9b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/76ef4c3e075348098c6557651a470f9b 2024-11-11T12:43:10,270 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/76ef4c3e075348098c6557651a470f9b, entries=50, sequenceid=87, filesize=7.1 K 2024-11-11T12:43:10,271 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(3040): Finished flush of dataSize ~13.42 KB/13740, heapSize ~35.86 KB/36720, currentSize=187.85 KB/192360 for 99827bdf8e81fc8bb34d29fe73f0a358 in 604ms, sequenceid=87, compaction requested=false 2024-11-11T12:43:10,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:10,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:10,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-11T12:43:10,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-11-11T12:43:10,277 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-11-11T12:43:10,280 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=104, table=TestAcidGuarantees in 1.5650 sec 2024-11-11T12:43:10,283 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5570 sec 2024-11-11T12:43:10,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:10,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=194.56 KB heapSize=510.52 KB 2024-11-11T12:43:10,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:10,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:10,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:10,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:10,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:10,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:10,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329050384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,390 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329050387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111c26c0c88dc2a44429bea0de8a5fcf1a2_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328990054/Put/seqid=0 2024-11-11T12:43:10,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742195_1371 (size=14594) 2024-11-11T12:43:10,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329050492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,493 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329050492, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,566 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/ba7fb8a85cda493f942ed77b87ec7b62 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ba7fb8a85cda493f942ed77b87ec7b62 2024-11-11T12:43:10,580 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/C of 99827bdf8e81fc8bb34d29fe73f0a358 into ba7fb8a85cda493f942ed77b87ec7b62(size=11.9 K), total size for store is 19.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:10,580 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:10,580 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/C, priority=12, startTime=1731328989623; duration=0sec 2024-11-11T12:43:10,580 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:10,580 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:10,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329050694, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,698 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:10,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329050696, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,808 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:10,812 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111c26c0c88dc2a44429bea0de8a5fcf1a2_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c26c0c88dc2a44429bea0de8a5fcf1a2_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:10,813 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/575fec6ad1ab4f7f80ac076599e665bb, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:10,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/575fec6ad1ab4f7f80ac076599e665bb is 175, key is test_row_0/A:col10/1731328990054/Put/seqid=0 2024-11-11T12:43:10,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742196_1372 (size=39549) 2024-11-11T12:43:10,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-11-11T12:43:10,823 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 104 completed 2024-11-11T12:43:10,825 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:10,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees 2024-11-11T12:43:10,826 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:10,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-11T12:43:10,827 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=106, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:10,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=107, ppid=106, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:10,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-11T12:43:10,980 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:10,980 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-11T12:43:10,980 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:10,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:10,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:10,981 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:10,981 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:10,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:10,999 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329050998, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:11,000 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:11,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329051000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:11,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-11T12:43:11,133 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:11,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-11T12:43:11,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:11,133 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:11,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:11,134 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] handler.RSProcedureHandler(58): pid=107 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:11,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=107 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:11,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=107 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:11,219 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=121, memsize=64.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/575fec6ad1ab4f7f80ac076599e665bb 2024-11-11T12:43:11,230 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/694b27b0b3d74dd39f2393a2946381a7 is 50, key is test_row_0/B:col10/1731328990054/Put/seqid=0 2024-11-11T12:43:11,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742197_1373 (size=12001) 2024-11-11T12:43:11,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/694b27b0b3d74dd39f2393a2946381a7 2024-11-11T12:43:11,246 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/8ca5b1d51e7f4fabaa1a1252a2f79d73 is 50, key is test_row_0/C:col10/1731328990054/Put/seqid=0 2024-11-11T12:43:11,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742198_1374 (size=12001) 2024-11-11T12:43:11,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=64.85 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/8ca5b1d51e7f4fabaa1a1252a2f79d73 2024-11-11T12:43:11,266 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/575fec6ad1ab4f7f80ac076599e665bb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/575fec6ad1ab4f7f80ac076599e665bb 2024-11-11T12:43:11,270 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/575fec6ad1ab4f7f80ac076599e665bb, entries=200, sequenceid=121, filesize=38.6 K 2024-11-11T12:43:11,271 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/694b27b0b3d74dd39f2393a2946381a7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/694b27b0b3d74dd39f2393a2946381a7 2024-11-11T12:43:11,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,274 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/694b27b0b3d74dd39f2393a2946381a7, entries=150, sequenceid=121, filesize=11.7 K 2024-11-11T12:43:11,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/8ca5b1d51e7f4fabaa1a1252a2f79d73 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8ca5b1d51e7f4fabaa1a1252a2f79d73 2024-11-11T12:43:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,279 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8ca5b1d51e7f4fabaa1a1252a2f79d73, entries=150, sequenceid=121, filesize=11.7 K 2024-11-11T12:43:11,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,280 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~194.56 KB/199230, heapSize ~510.47 KB/522720, currentSize=6.71 KB/6870 for 99827bdf8e81fc8bb34d29fe73f0a358 in 896ms, sequenceid=121, compaction requested=true 2024-11-11T12:43:11,280 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:11,280 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:11,281 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:11,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:11,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,281 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:11,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:11,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:11,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:11,281 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:11,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,282 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84407 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:11,282 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:11,282 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:11,282 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ce3a6b4507144d768fb0598965f34ea8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c4c849d3afe742bda1cc7145dc8338f5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/575fec6ad1ab4f7f80ac076599e665bb] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=82.4 K 2024-11-11T12:43:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,282 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,282 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ce3a6b4507144d768fb0598965f34ea8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c4c849d3afe742bda1cc7145dc8338f5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/575fec6ad1ab4f7f80ac076599e665bb] 2024-11-11T12:43:11,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,283 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 31455 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:11,283 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:11,283 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,283 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/66f34f4b4b0144d0b192d38cbe7b953c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bd5567cb4d3a432cbcd373cba4c11f70, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/694b27b0b3d74dd39f2393a2946381a7] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=30.7 K 2024-11-11T12:43:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,283 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ce3a6b4507144d768fb0598965f34ea8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731328987959 2024-11-11T12:43:11,283 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 66f34f4b4b0144d0b192d38cbe7b953c, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731328987959 2024-11-11T12:43:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,283 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,284 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting c4c849d3afe742bda1cc7145dc8338f5, keycount=50, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1731328988606 2024-11-11T12:43:11,284 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bd5567cb4d3a432cbcd373cba4c11f70, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1731328988606 2024-11-11T12:43:11,284 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 575fec6ad1ab4f7f80ac076599e665bb, keycount=200, bloomtype=ROW, size=38.6 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1731328990054 2024-11-11T12:43:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,284 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 694b27b0b3d74dd39f2393a2946381a7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1731328990054 2024-11-11T12:43:11,284 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,287 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,287 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,288 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,288 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=107 2024-11-11T12:43:11,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:11,288 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=6.71 KB heapSize=18.33 KB 2024-11-11T12:43:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:11,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:11,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:11,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:11,289 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,297 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:11,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,298 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,299 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#317 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,300 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/0920785fe52c4d8c8b7e822c003a4a4a is 50, key is test_row_0/B:col10/1731328990054/Put/seqid=0 2024-11-11T12:43:11,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,302 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111197ba68f4021345338c38136962a8f5ef_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_1/A:col10/1731328990385/Put/seqid=0 2024-11-11T12:43:11,302 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111a541100d409249e28be2cff3ad423727_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,305 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111a541100d409249e28be2cff3ad423727_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,305 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111a541100d409249e28be2cff3ad423727_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,305 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,306 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742200_1376 (size=7274) 2024-11-11T12:43:11,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742201_1377 (size=4469) 2024-11-11T12:43:11,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,309 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742199_1375 (size=12241) 2024-11-11T12:43:11,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,315 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111197ba68f4021345338c38136962a8f5ef_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111197ba68f4021345338c38136962a8f5ef_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,316 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/e5cbf8b78d044e3dba29d31a58b55f17, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/e5cbf8b78d044e3dba29d31a58b55f17 is 175, key is test_row_1/A:col10/1731328990385/Put/seqid=0 2024-11-11T12:43:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,317 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,330 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,352 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,352 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742202_1378 (size=13765) 2024-11-11T12:43:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,353 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=126, memsize=2.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/e5cbf8b78d044e3dba29d31a58b55f17 2024-11-11T12:43:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/270debd3692442d5bade68f8336bf76a is 50, key is test_row_1/B:col10/1731328990385/Put/seqid=0 2024-11-11T12:43:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,370 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742203_1379 (size=7315) 2024-11-11T12:43:11,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,379 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/270debd3692442d5bade68f8336bf76a 2024-11-11T12:43:11,379 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,391 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/3971f271b5de4aaa82ae066eebda12a7 is 50, key is test_row_1/C:col10/1731328990385/Put/seqid=0 2024-11-11T12:43:11,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,392 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,402 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742204_1380 (size=7315) 2024-11-11T12:43:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,404 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.24 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/3971f271b5de4aaa82ae066eebda12a7 2024-11-11T12:43:11,404 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,405 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/e5cbf8b78d044e3dba29d31a58b55f17 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e5cbf8b78d044e3dba29d31a58b55f17 2024-11-11T12:43:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,413 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e5cbf8b78d044e3dba29d31a58b55f17, entries=50, sequenceid=126, filesize=13.4 K 2024-11-11T12:43:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,414 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/270debd3692442d5bade68f8336bf76a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/270debd3692442d5bade68f8336bf76a 2024-11-11T12:43:11,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,420 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/270debd3692442d5bade68f8336bf76a, entries=50, sequenceid=126, filesize=7.1 K 2024-11-11T12:43:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,420 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,421 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/3971f271b5de4aaa82ae066eebda12a7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3971f271b5de4aaa82ae066eebda12a7 2024-11-11T12:43:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,421 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,426 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,429 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-11T12:43:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,432 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3971f271b5de4aaa82ae066eebda12a7, entries=50, sequenceid=126, filesize=7.1 K 2024-11-11T12:43:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,433 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(3040): Finished flush of dataSize ~6.71 KB/6870, heapSize ~18.28 KB/18720, currentSize=0 B/0 for 99827bdf8e81fc8bb34d29fe73f0a358 in 144ms, sequenceid=126, compaction requested=true 2024-11-11T12:43:11,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:11,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=107}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=107 2024-11-11T12:43:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=107 2024-11-11T12:43:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=107, resume processing ppid=106 2024-11-11T12:43:11,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, ppid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 607 msec 2024-11-11T12:43:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,436 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=106, table=TestAcidGuarantees in 610 msec 2024-11-11T12:43:11,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,499 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,510 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,518 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,520 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,525 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,528 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,713 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#318 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:11,714 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/f4649293a31143369b0864c5962283de is 175, key is test_row_0/A:col10/1731328990054/Put/seqid=0 2024-11-11T12:43:11,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,720 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/0920785fe52c4d8c8b7e822c003a4a4a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/0920785fe52c4d8c8b7e822c003a4a4a 2024-11-11T12:43:11,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742205_1381 (size=31195) 2024-11-11T12:43:11,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,737 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into 0920785fe52c4d8c8b7e822c003a4a4a(size=12.0 K), total size for store is 19.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:11,737 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:11,737 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=13, startTime=1731328991281; duration=0sec 2024-11-11T12:43:11,737 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:11,737 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:11,737 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:11,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,742 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 38770 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,742 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/C is initiating minor compaction (all files) 2024-11-11T12:43:11,742 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/C in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:11,742 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ba7fb8a85cda493f942ed77b87ec7b62, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/76ef4c3e075348098c6557651a470f9b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8ca5b1d51e7f4fabaa1a1252a2f79d73, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3971f271b5de4aaa82ae066eebda12a7] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=37.9 K 2024-11-11T12:43:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,743 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ba7fb8a85cda493f942ed77b87ec7b62, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731328987959 2024-11-11T12:43:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,743 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 76ef4c3e075348098c6557651a470f9b, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=87, earliestPutTs=1731328988606 2024-11-11T12:43:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,744 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8ca5b1d51e7f4fabaa1a1252a2f79d73, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1731328990054 2024-11-11T12:43:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,744 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3971f271b5de4aaa82ae066eebda12a7, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731328990385 2024-11-11T12:43:11,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,748 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/f4649293a31143369b0864c5962283de as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f4649293a31143369b0864c5962283de 2024-11-11T12:43:11,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,754 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into f4649293a31143369b0864c5962283de(size=30.5 K), total size for store is 43.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:11,754 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:11,755 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=13, startTime=1731328991280; duration=0sec 2024-11-11T12:43:11,755 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:11,755 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,774 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#C#compaction#322 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:11,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,775 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/8e1505516907497db5dfdf18e33ed715 is 50, key is test_row_0/C:col10/1731328990054/Put/seqid=0 2024-11-11T12:43:11,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742206_1382 (size=12275) 2024-11-11T12:43:11,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,790 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,795 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,796 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/8e1505516907497db5dfdf18e33ed715 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e1505516907497db5dfdf18e33ed715 2024-11-11T12:43:11,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,802 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/C of 99827bdf8e81fc8bb34d29fe73f0a358 into 8e1505516907497db5dfdf18e33ed715(size=12.0 K), total size for store is 12.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:11,802 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:11,802 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/C, priority=12, startTime=1731328991281; duration=0sec 2024-11-11T12:43:11,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,802 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:11,802 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:11,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:11,847 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:43:11,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:11,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:11,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:11,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:11,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:11,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,848 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:11,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411116387cad272a845139578f10484637705_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328991845/Put/seqid=0 2024-11-11T12:43:11,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742207_1383 (size=14794) 2024-11-11T12:43:11,891 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:11,897 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411116387cad272a845139578f10484637705_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411116387cad272a845139578f10484637705_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:11,898 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/d0a71c153c62478fbe9d2afbec6cb96b, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:11,898 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/d0a71c153c62478fbe9d2afbec6cb96b is 175, key is test_row_0/A:col10/1731328991845/Put/seqid=0 2024-11-11T12:43:11,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742208_1384 (size=39749) 2024-11-11T12:43:11,912 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=140, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/d0a71c153c62478fbe9d2afbec6cb96b 2024-11-11T12:43:11,921 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/212cf74028e44fca91870f4ef64ea34f is 50, key is test_row_0/B:col10/1731328991845/Put/seqid=0 2024-11-11T12:43:11,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=106 2024-11-11T12:43:11,931 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 106 completed 2024-11-11T12:43:11,933 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:11,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees 2024-11-11T12:43:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-11T12:43:11,935 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:11,935 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=108, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:11,935 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:11,944 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:11,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329051940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:11,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:11,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329051940, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:11,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742209_1385 (size=12151) 2024-11-11T12:43:11,951 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/212cf74028e44fca91870f4ef64ea34f 2024-11-11T12:43:11,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/a7b869c6ee3b4e5bb53453e4b7fb0813 is 50, key is test_row_0/C:col10/1731328991845/Put/seqid=0 2024-11-11T12:43:12,004 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742210_1386 (size=12151) 2024-11-11T12:43:12,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-11T12:43:12,047 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329052047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,048 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329052047, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,088 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,088 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-11T12:43:12,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:12,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,089 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,089 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,153 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329052150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,154 DEBUG [Thread-1568 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4182 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:12,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-11T12:43:12,241 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,242 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-11T12:43:12,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:12,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,242 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,242 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,251 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329052248, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,252 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329052250, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,394 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,395 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-11T12:43:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,395 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] handler.RSProcedureHandler(58): pid=109 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,395 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=109 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=109 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:12,403 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/a7b869c6ee3b4e5bb53453e4b7fb0813 2024-11-11T12:43:12,410 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/d0a71c153c62478fbe9d2afbec6cb96b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d0a71c153c62478fbe9d2afbec6cb96b 2024-11-11T12:43:12,420 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d0a71c153c62478fbe9d2afbec6cb96b, entries=200, sequenceid=140, filesize=38.8 K 2024-11-11T12:43:12,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/212cf74028e44fca91870f4ef64ea34f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/212cf74028e44fca91870f4ef64ea34f 2024-11-11T12:43:12,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/212cf74028e44fca91870f4ef64ea34f, entries=150, sequenceid=140, filesize=11.9 K 2024-11-11T12:43:12,434 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/a7b869c6ee3b4e5bb53453e4b7fb0813 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/a7b869c6ee3b4e5bb53453e4b7fb0813 2024-11-11T12:43:12,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/a7b869c6ee3b4e5bb53453e4b7fb0813, entries=150, sequenceid=140, filesize=11.9 K 2024-11-11T12:43:12,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 99827bdf8e81fc8bb34d29fe73f0a358 in 596ms, sequenceid=140, compaction requested=true 2024-11-11T12:43:12,444 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:12,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:12,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:12,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:12,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:12,444 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:12,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:12,444 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:12,444 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:12,449 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 31707 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:12,449 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:12,449 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,449 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/0920785fe52c4d8c8b7e822c003a4a4a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/270debd3692442d5bade68f8336bf76a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/212cf74028e44fca91870f4ef64ea34f] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=31.0 K 2024-11-11T12:43:12,449 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84709 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:12,449 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:12,449 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,450 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f4649293a31143369b0864c5962283de, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e5cbf8b78d044e3dba29d31a58b55f17, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d0a71c153c62478fbe9d2afbec6cb96b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=82.7 K 2024-11-11T12:43:12,450 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,450 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0920785fe52c4d8c8b7e822c003a4a4a, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1731328990054 2024-11-11T12:43:12,450 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f4649293a31143369b0864c5962283de, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e5cbf8b78d044e3dba29d31a58b55f17, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d0a71c153c62478fbe9d2afbec6cb96b] 2024-11-11T12:43:12,450 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f4649293a31143369b0864c5962283de, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1731328990054 2024-11-11T12:43:12,450 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 270debd3692442d5bade68f8336bf76a, keycount=50, bloomtype=ROW, size=7.1 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731328990385 2024-11-11T12:43:12,450 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5cbf8b78d044e3dba29d31a58b55f17, keycount=50, bloomtype=ROW, size=13.4 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731328990385 2024-11-11T12:43:12,450 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 212cf74028e44fca91870f4ef64ea34f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1731328991704 2024-11-11T12:43:12,452 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0a71c153c62478fbe9d2afbec6cb96b, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1731328991704 2024-11-11T12:43:12,465 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#326 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:12,466 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:12,466 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/b35e5c07e1d54864b289c294ce9352bb is 50, key is test_row_0/B:col10/1731328991845/Put/seqid=0 2024-11-11T12:43:12,471 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111e3a26678899b482faa0a4a18acd712af_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:12,473 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111e3a26678899b482faa0a4a18acd712af_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:12,473 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e3a26678899b482faa0a4a18acd712af_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:12,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742211_1387 (size=12493) 2024-11-11T12:43:12,483 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/b35e5c07e1d54864b289c294ce9352bb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/b35e5c07e1d54864b289c294ce9352bb 2024-11-11T12:43:12,491 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into b35e5c07e1d54864b289c294ce9352bb(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:12,491 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:12,491 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=13, startTime=1731328992444; duration=0sec 2024-11-11T12:43:12,491 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:12,491 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:12,491 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-11T12:43:12,492 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:43:12,493 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:43:12,493 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. because compaction request was cancelled 2024-11-11T12:43:12,493 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:12,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742212_1388 (size=4469) 2024-11-11T12:43:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-11T12:43:12,548 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,550 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=109 2024-11-11T12:43:12,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:12,550 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:43:12,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:12,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:12,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:12,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:12,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:12,550 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:12,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:12,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:12,567 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111789822a902594134bb1c36692d16109e_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328991938/Put/seqid=0 2024-11-11T12:43:12,580 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329052577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,584 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329052580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742213_1389 (size=12304) 2024-11-11T12:43:12,682 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329052681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329052685, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,810 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329052809, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,811 DEBUG [Thread-1572 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8170 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:12,819 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329052819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,820 DEBUG [Thread-1564 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=7, retries=16, started=8179 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:12,887 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329052885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,893 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:12,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329052889, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:12,896 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#327 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:12,897 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/9bcd88e2c7034ad8ba4fb7d0cf290cfc is 175, key is test_row_0/A:col10/1731328991845/Put/seqid=0 2024-11-11T12:43:12,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742214_1390 (size=31447) 2024-11-11T12:43:12,989 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:12,993 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111789822a902594134bb1c36692d16109e_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111789822a902594134bb1c36692d16109e_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:12,993 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/ece9e00889fc43799295c7c33a42211c, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:12,994 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/ece9e00889fc43799295c7c33a42211c is 175, key is test_row_0/A:col10/1731328991938/Put/seqid=0 2024-11-11T12:43:12,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742215_1391 (size=31105) 2024-11-11T12:43:13,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-11T12:43:13,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:13,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329053190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:13,197 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:13,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329053194, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:13,312 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/9bcd88e2c7034ad8ba4fb7d0cf290cfc as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/9bcd88e2c7034ad8ba4fb7d0cf290cfc 2024-11-11T12:43:13,322 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into 9bcd88e2c7034ad8ba4fb7d0cf290cfc(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:13,322 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:13,322 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=13, startTime=1731328992444; duration=0sec 2024-11-11T12:43:13,322 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:13,322 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:13,398 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=166, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/ece9e00889fc43799295c7c33a42211c 2024-11-11T12:43:13,407 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/d8cdbae8c862489cb9304e7831a31a0f is 50, key is test_row_0/B:col10/1731328991938/Put/seqid=0 2024-11-11T12:43:13,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742216_1392 (size=12151) 2024-11-11T12:43:13,700 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:13,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329053697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:13,708 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:13,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329053704, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:13,822 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/d8cdbae8c862489cb9304e7831a31a0f 2024-11-11T12:43:13,835 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/140c7b9116f741048f700e0a89a88ff5 is 50, key is test_row_0/C:col10/1731328991938/Put/seqid=0 2024-11-11T12:43:13,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742217_1393 (size=12151) 2024-11-11T12:43:14,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-11T12:43:14,255 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=166 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/140c7b9116f741048f700e0a89a88ff5 2024-11-11T12:43:14,259 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/ece9e00889fc43799295c7c33a42211c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ece9e00889fc43799295c7c33a42211c 2024-11-11T12:43:14,263 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ece9e00889fc43799295c7c33a42211c, entries=150, sequenceid=166, filesize=30.4 K 2024-11-11T12:43:14,263 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/d8cdbae8c862489cb9304e7831a31a0f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/d8cdbae8c862489cb9304e7831a31a0f 2024-11-11T12:43:14,266 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/d8cdbae8c862489cb9304e7831a31a0f, entries=150, sequenceid=166, filesize=11.9 K 2024-11-11T12:43:14,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/140c7b9116f741048f700e0a89a88ff5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/140c7b9116f741048f700e0a89a88ff5 2024-11-11T12:43:14,270 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/140c7b9116f741048f700e0a89a88ff5, entries=150, sequenceid=166, filesize=11.9 K 2024-11-11T12:43:14,271 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 99827bdf8e81fc8bb34d29fe73f0a358 in 1721ms, sequenceid=166, compaction requested=true 2024-11-11T12:43:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:14,271 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-11-11T12:43:14,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-11-11T12:43:14,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=108 2024-11-11T12:43:14,273 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3370 sec 2024-11-11T12:43:14,275 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=108, table=TestAcidGuarantees in 2.3410 sec 2024-11-11T12:43:14,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:14,715 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-11T12:43:14,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:14,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:14,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:14,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:14,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:14,716 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:14,743 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411116f3a8227bfea44679ce79ab2e5501dc3_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328992571/Put/seqid=0 2024-11-11T12:43:14,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742218_1394 (size=12304) 2024-11-11T12:43:14,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:14,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329054824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:14,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:14,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329054825, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:14,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:14,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329054931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:14,934 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:14,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329054931, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:15,140 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:15,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329055136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:15,154 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:15,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329055151, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:15,167 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:15,173 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411116f3a8227bfea44679ce79ab2e5501dc3_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411116f3a8227bfea44679ce79ab2e5501dc3_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:15,175 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/004d68e483ad41c49148899b6a8febac, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:15,176 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/004d68e483ad41c49148899b6a8febac is 175, key is test_row_0/A:col10/1731328992571/Put/seqid=0 2024-11-11T12:43:15,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742219_1395 (size=31105) 2024-11-11T12:43:15,447 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:15,447 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329055444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:15,465 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:15,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329055463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:15,621 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=179, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/004d68e483ad41c49148899b6a8febac 2024-11-11T12:43:15,631 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/eadbf14c611e4d8e9299252f8a97270b is 50, key is test_row_0/B:col10/1731328992571/Put/seqid=0 2024-11-11T12:43:15,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742220_1396 (size=12151) 2024-11-11T12:43:15,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/eadbf14c611e4d8e9299252f8a97270b 2024-11-11T12:43:15,652 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/3b5f83300512427cb37bfee3d4792524 is 50, key is test_row_0/C:col10/1731328992571/Put/seqid=0 2024-11-11T12:43:15,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742221_1397 (size=12151) 2024-11-11T12:43:15,953 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:15,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329055950, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:15,972 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:15,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329055969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=108 2024-11-11T12:43:16,042 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 108 completed 2024-11-11T12:43:16,061 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/3b5f83300512427cb37bfee3d4792524 2024-11-11T12:43:16,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:16,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees 2024-11-11T12:43:16,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-11T12:43:16,073 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:16,074 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=110, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:16,074 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:16,083 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/004d68e483ad41c49148899b6a8febac as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/004d68e483ad41c49148899b6a8febac 2024-11-11T12:43:16,104 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/004d68e483ad41c49148899b6a8febac, entries=150, sequenceid=179, filesize=30.4 K 2024-11-11T12:43:16,105 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/eadbf14c611e4d8e9299252f8a97270b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/eadbf14c611e4d8e9299252f8a97270b 2024-11-11T12:43:16,128 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/eadbf14c611e4d8e9299252f8a97270b, entries=150, sequenceid=179, filesize=11.9 K 2024-11-11T12:43:16,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/3b5f83300512427cb37bfee3d4792524 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3b5f83300512427cb37bfee3d4792524 2024-11-11T12:43:16,140 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3b5f83300512427cb37bfee3d4792524, entries=150, sequenceid=179, filesize=11.9 K 2024-11-11T12:43:16,141 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 99827bdf8e81fc8bb34d29fe73f0a358 in 1425ms, sequenceid=179, compaction requested=true 2024-11-11T12:43:16,141 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:16,141 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:16,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:16,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:16,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:16,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:16,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:16,141 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:43:16,143 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:16,148 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:16,148 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:16,148 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,149 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/9bcd88e2c7034ad8ba4fb7d0cf290cfc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ece9e00889fc43799295c7c33a42211c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/004d68e483ad41c49148899b6a8febac] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=91.5 K 2024-11-11T12:43:16,149 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,149 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/9bcd88e2c7034ad8ba4fb7d0cf290cfc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ece9e00889fc43799295c7c33a42211c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/004d68e483ad41c49148899b6a8febac] 2024-11-11T12:43:16,152 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 9bcd88e2c7034ad8ba4fb7d0cf290cfc, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1731328991704 2024-11-11T12:43:16,152 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48728 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:16,152 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/C is initiating minor compaction (all files) 2024-11-11T12:43:16,152 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ece9e00889fc43799295c7c33a42211c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731328991929 2024-11-11T12:43:16,152 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/C in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,152 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e1505516907497db5dfdf18e33ed715, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/a7b869c6ee3b4e5bb53453e4b7fb0813, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/140c7b9116f741048f700e0a89a88ff5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3b5f83300512427cb37bfee3d4792524] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=47.6 K 2024-11-11T12:43:16,156 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 004d68e483ad41c49148899b6a8febac, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731328992571 2024-11-11T12:43:16,156 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e1505516907497db5dfdf18e33ed715, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1731328990054 2024-11-11T12:43:16,157 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a7b869c6ee3b4e5bb53453e4b7fb0813, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1731328991704 2024-11-11T12:43:16,157 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 140c7b9116f741048f700e0a89a88ff5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731328991929 2024-11-11T12:43:16,158 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3b5f83300512427cb37bfee3d4792524, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731328992571 2024-11-11T12:43:16,169 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:16,172 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#C#compaction#335 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:16,173 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/fe9783b7f541420ebe755db4f143a66d is 50, key is test_row_0/C:col10/1731328992571/Put/seqid=0 2024-11-11T12:43:16,174 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111536ec222be4b431b8079593dd307142e_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:16,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-11T12:43:16,176 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111536ec222be4b431b8079593dd307142e_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:16,177 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111536ec222be4b431b8079593dd307142e_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:16,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:16,182 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:43:16,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:16,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:16,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:16,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:16,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:16,183 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:16,199 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411119d6a519395174d45a3e3f5c2a94c1696_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328994751/Put/seqid=0 2024-11-11T12:43:16,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742223_1399 (size=4469) 2024-11-11T12:43:16,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742222_1398 (size=12561) 2024-11-11T12:43:16,206 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#334 average throughput is 0.66 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:16,207 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/f0f7c412b6274afca4f780c1d6f8b2e5 is 175, key is test_row_0/A:col10/1731328992571/Put/seqid=0 2024-11-11T12:43:16,227 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-11T12:43:16,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:16,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,228 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,229 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742224_1400 (size=14794) 2024-11-11T12:43:16,247 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:16,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329056239, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,247 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:16,252 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411119d6a519395174d45a3e3f5c2a94c1696_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119d6a519395174d45a3e3f5c2a94c1696_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:16,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742225_1401 (size=31549) 2024-11-11T12:43:16,256 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/1bdc5966b88543a79879e43561117c35, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:16,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/1bdc5966b88543a79879e43561117c35 is 175, key is test_row_0/A:col10/1731328994751/Put/seqid=0 2024-11-11T12:43:16,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742226_1402 (size=39749) 2024-11-11T12:43:16,355 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:16,355 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329056349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-11T12:43:16,380 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,381 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-11T12:43:16,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:16,381 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,381 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,382 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,533 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-11T12:43:16,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:16,534 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,534 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,562 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:16,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329056560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,614 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/fe9783b7f541420ebe755db4f143a66d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fe9783b7f541420ebe755db4f143a66d 2024-11-11T12:43:16,622 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/C of 99827bdf8e81fc8bb34d29fe73f0a358 into fe9783b7f541420ebe755db4f143a66d(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:16,622 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:16,622 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/C, priority=12, startTime=1731328996141; duration=0sec 2024-11-11T12:43:16,622 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:16,622 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:16,623 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:16,630 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:16,630 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:16,630 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,630 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/b35e5c07e1d54864b289c294ce9352bb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/d8cdbae8c862489cb9304e7831a31a0f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/eadbf14c611e4d8e9299252f8a97270b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=35.9 K 2024-11-11T12:43:16,633 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b35e5c07e1d54864b289c294ce9352bb, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1731328991704 2024-11-11T12:43:16,634 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d8cdbae8c862489cb9304e7831a31a0f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=166, earliestPutTs=1731328991929 2024-11-11T12:43:16,634 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting eadbf14c611e4d8e9299252f8a97270b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731328992571 2024-11-11T12:43:16,648 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#337 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:16,648 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/c3f8203c435646c19aff0a4d28b1676d is 50, key is test_row_0/B:col10/1731328992571/Put/seqid=0 2024-11-11T12:43:16,660 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/f0f7c412b6274afca4f780c1d6f8b2e5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f0f7c412b6274afca4f780c1d6f8b2e5 2024-11-11T12:43:16,666 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into f0f7c412b6274afca4f780c1d6f8b2e5(size=30.8 K), total size for store is 30.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:16,666 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:16,666 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=13, startTime=1731328996141; duration=0sec 2024-11-11T12:43:16,666 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:16,666 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:16,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-11T12:43:16,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742227_1403 (size=12595) 2024-11-11T12:43:16,689 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=204, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/1bdc5966b88543a79879e43561117c35 2024-11-11T12:43:16,691 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,692 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-11T12:43:16,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:16,692 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,692 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] handler.RSProcedureHandler(58): pid=111 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=111 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=111 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:16,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/8abaa8dea7344c81a0e84df827b0cb99 is 50, key is test_row_0/B:col10/1731328994751/Put/seqid=0 2024-11-11T12:43:16,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742228_1404 (size=12151) 2024-11-11T12:43:16,722 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/8abaa8dea7344c81a0e84df827b0cb99 2024-11-11T12:43:16,733 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/07e370351fb748479519bee456f5a266 is 50, key is test_row_0/C:col10/1731328994751/Put/seqid=0 2024-11-11T12:43:16,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742229_1405 (size=12151) 2024-11-11T12:43:16,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=204 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/07e370351fb748479519bee456f5a266 2024-11-11T12:43:16,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/1bdc5966b88543a79879e43561117c35 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1bdc5966b88543a79879e43561117c35 2024-11-11T12:43:16,755 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1bdc5966b88543a79879e43561117c35, entries=200, sequenceid=204, filesize=38.8 K 2024-11-11T12:43:16,756 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/8abaa8dea7344c81a0e84df827b0cb99 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8abaa8dea7344c81a0e84df827b0cb99 2024-11-11T12:43:16,760 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8abaa8dea7344c81a0e84df827b0cb99, entries=150, sequenceid=204, filesize=11.9 K 2024-11-11T12:43:16,761 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/07e370351fb748479519bee456f5a266 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/07e370351fb748479519bee456f5a266 2024-11-11T12:43:16,765 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/07e370351fb748479519bee456f5a266, entries=150, sequenceid=204, filesize=11.9 K 2024-11-11T12:43:16,766 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 99827bdf8e81fc8bb34d29fe73f0a358 in 584ms, sequenceid=204, compaction requested=false 2024-11-11T12:43:16,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:16,845 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,846 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=111 2024-11-11T12:43:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:16,846 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:43:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:16,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:16,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:16,860 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111f5dd0a4f4efb46dd9303ae2cbb229f9e_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328996217/Put/seqid=0 2024-11-11T12:43:16,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742230_1406 (size=12304) 2024-11-11T12:43:16,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:16,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:16,982 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:16,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329056975, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,983 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:16,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 135 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329056976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:16,987 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:16,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329056982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:17,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:17,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329057083, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:17,094 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/c3f8203c435646c19aff0a4d28b1676d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/c3f8203c435646c19aff0a4d28b1676d 2024-11-11T12:43:17,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:17,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329057088, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:17,098 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into c3f8203c435646c19aff0a4d28b1676d(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:17,098 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:17,098 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=13, startTime=1731328996141; duration=0sec 2024-11-11T12:43:17,099 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:17,099 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:17,180 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-11T12:43:17,270 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:17,275 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111f5dd0a4f4efb46dd9303ae2cbb229f9e_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111f5dd0a4f4efb46dd9303ae2cbb229f9e_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:17,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/1b7325cfd679428ebb39104eb566e505, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:17,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/1b7325cfd679428ebb39104eb566e505 is 175, key is test_row_0/A:col10/1731328996217/Put/seqid=0 2024-11-11T12:43:17,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:17,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329057289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:17,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742231_1407 (size=31105) 2024-11-11T12:43:17,309 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:17,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329057305, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:17,310 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=217, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/1b7325cfd679428ebb39104eb566e505 2024-11-11T12:43:17,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/4de76553e9c14e0391d4fa78181f7b5a is 50, key is test_row_0/B:col10/1731328996217/Put/seqid=0 2024-11-11T12:43:17,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742232_1408 (size=12151) 2024-11-11T12:43:17,593 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:17,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329057591, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:17,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:17,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329057611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:17,741 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/4de76553e9c14e0391d4fa78181f7b5a 2024-11-11T12:43:17,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/65b7669277a449eb8553e5c50218bc89 is 50, key is test_row_0/C:col10/1731328996217/Put/seqid=0 2024-11-11T12:43:17,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742233_1409 (size=12151) 2024-11-11T12:43:17,799 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=217 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/65b7669277a449eb8553e5c50218bc89 2024-11-11T12:43:17,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/1b7325cfd679428ebb39104eb566e505 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1b7325cfd679428ebb39104eb566e505 2024-11-11T12:43:17,809 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1b7325cfd679428ebb39104eb566e505, entries=150, sequenceid=217, filesize=30.4 K 2024-11-11T12:43:17,810 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/4de76553e9c14e0391d4fa78181f7b5a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4de76553e9c14e0391d4fa78181f7b5a 2024-11-11T12:43:17,815 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4de76553e9c14e0391d4fa78181f7b5a, entries=150, sequenceid=217, filesize=11.9 K 2024-11-11T12:43:17,816 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/65b7669277a449eb8553e5c50218bc89 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/65b7669277a449eb8553e5c50218bc89 2024-11-11T12:43:17,820 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/65b7669277a449eb8553e5c50218bc89, entries=150, sequenceid=217, filesize=11.9 K 2024-11-11T12:43:17,821 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 99827bdf8e81fc8bb34d29fe73f0a358 in 974ms, sequenceid=217, compaction requested=true 2024-11-11T12:43:17,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:17,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:17,821 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=111}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=111 2024-11-11T12:43:17,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=111 2024-11-11T12:43:17,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=111, resume processing ppid=110 2024-11-11T12:43:17,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.7480 sec 2024-11-11T12:43:17,826 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=110, table=TestAcidGuarantees in 1.7630 sec 2024-11-11T12:43:18,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:18,109 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-11T12:43:18,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:18,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:18,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:18,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:18,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:18,109 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:18,129 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111b5ae1632ba6b499b9be6c4d044f5902b_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328996973/Put/seqid=0 2024-11-11T12:43:18,152 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:18,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329058144, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,163 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:18,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329058152, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742234_1410 (size=12304) 2024-11-11T12:43:18,177 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:18,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-11-11T12:43:18,181 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 110 completed 2024-11-11T12:43:18,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:18,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees 2024-11-11T12:43:18,184 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:18,185 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=112, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:18,185 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:18,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-11T12:43:18,197 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111b5ae1632ba6b499b9be6c4d044f5902b_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b5ae1632ba6b499b9be6c4d044f5902b_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:18,199 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/4975a4ed65ec4ea18c54c88686ab1972, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:18,200 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/4975a4ed65ec4ea18c54c88686ab1972 is 175, key is test_row_0/A:col10/1731328996973/Put/seqid=0 2024-11-11T12:43:18,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742235_1411 (size=31105) 2024-11-11T12:43:18,259 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:18,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329058253, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,271 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:18,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329058264, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-11T12:43:18,338 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,338 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-11T12:43:18,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:18,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,340 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,340 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,471 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329058464, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,479 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:18,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329058473, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-11T12:43:18,504 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,512 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-11T12:43:18,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:18,512 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,512 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,513 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,652 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=244, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/4975a4ed65ec4ea18c54c88686ab1972 2024-11-11T12:43:18,662 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/3e61ac387dd040a8bec40ab9b7a14fc9 is 50, key is test_row_0/B:col10/1731328996973/Put/seqid=0 2024-11-11T12:43:18,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742236_1412 (size=12151) 2024-11-11T12:43:18,674 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/3e61ac387dd040a8bec40ab9b7a14fc9 2024-11-11T12:43:18,674 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,674 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-11T12:43:18,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:18,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,675 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] handler.RSProcedureHandler(58): pid=113 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=113 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=113 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:18,690 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/fb742f2230ec4e66b0c5104593052ea9 is 50, key is test_row_0/C:col10/1731328996973/Put/seqid=0 2024-11-11T12:43:18,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742237_1413 (size=12151) 2024-11-11T12:43:18,752 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=244 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/fb742f2230ec4e66b0c5104593052ea9 2024-11-11T12:43:18,766 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/4975a4ed65ec4ea18c54c88686ab1972 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/4975a4ed65ec4ea18c54c88686ab1972 2024-11-11T12:43:18,771 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/4975a4ed65ec4ea18c54c88686ab1972, entries=150, sequenceid=244, filesize=30.4 K 2024-11-11T12:43:18,772 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/3e61ac387dd040a8bec40ab9b7a14fc9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/3e61ac387dd040a8bec40ab9b7a14fc9 2024-11-11T12:43:18,777 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:18,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 101 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329058773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,783 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:18,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329058782, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,794 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/3e61ac387dd040a8bec40ab9b7a14fc9, entries=150, sequenceid=244, filesize=11.9 K 2024-11-11T12:43:18,796 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/fb742f2230ec4e66b0c5104593052ea9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fb742f2230ec4e66b0c5104593052ea9 2024-11-11T12:43:18,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-11T12:43:18,810 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fb742f2230ec4e66b0c5104593052ea9, entries=150, sequenceid=244, filesize=11.9 K 2024-11-11T12:43:18,816 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for 99827bdf8e81fc8bb34d29fe73f0a358 in 708ms, sequenceid=244, compaction requested=true 2024-11-11T12:43:18,816 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:18,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:18,817 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:18,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:18,817 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:18,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:18,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:18,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:18,817 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:18,818 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 133508 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:18,818 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:18,818 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,818 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f0f7c412b6274afca4f780c1d6f8b2e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1bdc5966b88543a79879e43561117c35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1b7325cfd679428ebb39104eb566e505, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/4975a4ed65ec4ea18c54c88686ab1972] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=130.4 K 2024-11-11T12:43:18,818 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,819 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f0f7c412b6274afca4f780c1d6f8b2e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1bdc5966b88543a79879e43561117c35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1b7325cfd679428ebb39104eb566e505, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/4975a4ed65ec4ea18c54c88686ab1972] 2024-11-11T12:43:18,819 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49048 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:18,819 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:18,819 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,819 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/c3f8203c435646c19aff0a4d28b1676d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8abaa8dea7344c81a0e84df827b0cb99, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4de76553e9c14e0391d4fa78181f7b5a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/3e61ac387dd040a8bec40ab9b7a14fc9] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=47.9 K 2024-11-11T12:43:18,819 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f0f7c412b6274afca4f780c1d6f8b2e5, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731328992571 2024-11-11T12:43:18,819 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c3f8203c435646c19aff0a4d28b1676d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731328992571 2024-11-11T12:43:18,820 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1bdc5966b88543a79879e43561117c35, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731328994751 2024-11-11T12:43:18,820 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8abaa8dea7344c81a0e84df827b0cb99, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731328994751 2024-11-11T12:43:18,820 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1b7325cfd679428ebb39104eb566e505, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731328996217 2024-11-11T12:43:18,821 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4de76553e9c14e0391d4fa78181f7b5a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731328996217 2024-11-11T12:43:18,821 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4975a4ed65ec4ea18c54c88686ab1972, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731328996973 2024-11-11T12:43:18,821 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e61ac387dd040a8bec40ab9b7a14fc9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731328996973 2024-11-11T12:43:18,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:18,829 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=113 2024-11-11T12:43:18,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,832 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-11T12:43:18,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:18,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:18,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:18,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:18,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:18,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:18,834 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:18,843 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#347 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:18,844 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/43b48fedfae54b04913cf5c4144b694d is 50, key is test_row_0/B:col10/1731328996973/Put/seqid=0 2024-11-11T12:43:18,857 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411117558d043088d4379a35d81b2707a6913_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:18,860 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411117558d043088d4379a35d81b2707a6913_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:18,860 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411117558d043088d4379a35d81b2707a6913_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:18,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742239_1415 (size=4469) 2024-11-11T12:43:18,907 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111183b7cffea39447128d201e1c92383e53_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328998127/Put/seqid=0 2024-11-11T12:43:18,909 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742238_1414 (size=12731) 2024-11-11T12:43:18,914 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#346 average throughput is 0.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:18,915 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/d8726f5b3f6d4fedbefb2578be663e8e is 175, key is test_row_0/A:col10/1731328996973/Put/seqid=0 2024-11-11T12:43:18,929 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/43b48fedfae54b04913cf5c4144b694d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/43b48fedfae54b04913cf5c4144b694d 2024-11-11T12:43:18,939 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into 43b48fedfae54b04913cf5c4144b694d(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:18,939 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:18,939 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=12, startTime=1731328998817; duration=0sec 2024-11-11T12:43:18,939 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:18,939 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:18,939 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:18,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742240_1416 (size=12304) 2024-11-11T12:43:18,942 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49014 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:18,942 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/C is initiating minor compaction (all files) 2024-11-11T12:43:18,942 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/C in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:18,943 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fe9783b7f541420ebe755db4f143a66d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/07e370351fb748479519bee456f5a266, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/65b7669277a449eb8553e5c50218bc89, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fb742f2230ec4e66b0c5104593052ea9] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=47.9 K 2024-11-11T12:43:18,943 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting fe9783b7f541420ebe755db4f143a66d, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731328992571 2024-11-11T12:43:18,943 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 07e370351fb748479519bee456f5a266, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=204, earliestPutTs=1731328994751 2024-11-11T12:43:18,943 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 65b7669277a449eb8553e5c50218bc89, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=217, earliestPutTs=1731328996217 2024-11-11T12:43:18,945 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting fb742f2230ec4e66b0c5104593052ea9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731328996973 2024-11-11T12:43:18,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:18,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742241_1417 (size=31685) 2024-11-11T12:43:18,960 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111183b7cffea39447128d201e1c92383e53_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111183b7cffea39447128d201e1c92383e53_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:18,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/80465fde17d74958be3749c1a66af50c, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:18,962 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/80465fde17d74958be3749c1a66af50c is 175, key is test_row_0/A:col10/1731328998127/Put/seqid=0 2024-11-11T12:43:18,969 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#C#compaction#349 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:18,969 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/590e2f8a81e64058b4d98ca64b32008e is 50, key is test_row_0/C:col10/1731328996973/Put/seqid=0 2024-11-11T12:43:18,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742242_1418 (size=31105) 2024-11-11T12:43:18,974 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/80465fde17d74958be3749c1a66af50c 2024-11-11T12:43:18,988 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/8be456b3f3124fe7a88727915bf086a3 is 50, key is test_row_0/B:col10/1731328998127/Put/seqid=0 2024-11-11T12:43:18,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742243_1419 (size=12697) 2024-11-11T12:43:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:18,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:19,004 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/590e2f8a81e64058b4d98ca64b32008e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/590e2f8a81e64058b4d98ca64b32008e 2024-11-11T12:43:19,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742244_1420 (size=12151) 2024-11-11T12:43:19,008 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/8be456b3f3124fe7a88727915bf086a3 2024-11-11T12:43:19,022 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/C of 99827bdf8e81fc8bb34d29fe73f0a358 into 590e2f8a81e64058b4d98ca64b32008e(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:19,022 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:19,022 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/C, priority=12, startTime=1731328998817; duration=0sec 2024-11-11T12:43:19,022 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:19,022 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:19,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/09bce5af97354beca8649b8fd1b23eec is 50, key is test_row_0/C:col10/1731328998127/Put/seqid=0 2024-11-11T12:43:19,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742245_1421 (size=12151) 2024-11-11T12:43:19,112 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:19,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329059107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:19,225 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:19,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329059220, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:19,289 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:19,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329059285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:19,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:19,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329059289, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:19,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-11T12:43:19,369 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/d8726f5b3f6d4fedbefb2578be663e8e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d8726f5b3f6d4fedbefb2578be663e8e 2024-11-11T12:43:19,383 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into d8726f5b3f6d4fedbefb2578be663e8e(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:19,383 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:19,383 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=12, startTime=1731328998817; duration=0sec 2024-11-11T12:43:19,383 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:19,383 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:19,436 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:19,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329059432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:19,444 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/09bce5af97354beca8649b8fd1b23eec 2024-11-11T12:43:19,468 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/80465fde17d74958be3749c1a66af50c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/80465fde17d74958be3749c1a66af50c 2024-11-11T12:43:19,489 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/80465fde17d74958be3749c1a66af50c, entries=150, sequenceid=254, filesize=30.4 K 2024-11-11T12:43:19,492 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/8be456b3f3124fe7a88727915bf086a3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8be456b3f3124fe7a88727915bf086a3 2024-11-11T12:43:19,498 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8be456b3f3124fe7a88727915bf086a3, entries=150, sequenceid=254, filesize=11.9 K 2024-11-11T12:43:19,500 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/09bce5af97354beca8649b8fd1b23eec as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/09bce5af97354beca8649b8fd1b23eec 2024-11-11T12:43:19,504 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/09bce5af97354beca8649b8fd1b23eec, entries=150, sequenceid=254, filesize=11.9 K 2024-11-11T12:43:19,506 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for 99827bdf8e81fc8bb34d29fe73f0a358 in 674ms, sequenceid=254, compaction requested=false 2024-11-11T12:43:19,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:19,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:19,506 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=113}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=113 2024-11-11T12:43:19,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=113 2024-11-11T12:43:19,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-11-11T12:43:19,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3230 sec 2024-11-11T12:43:19,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=112, table=TestAcidGuarantees in 1.3280 sec 2024-11-11T12:43:19,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:19,740 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-11T12:43:19,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:19,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:19,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:19,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:19,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:19,740 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:19,747 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111798d319fd56e4f83913dbbe1df76ed61_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731328999100/Put/seqid=0 2024-11-11T12:43:19,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:19,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329059771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:19,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742246_1422 (size=14994) 2024-11-11T12:43:19,788 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:19,800 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111798d319fd56e4f83913dbbe1df76ed61_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111798d319fd56e4f83913dbbe1df76ed61_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:19,803 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/8a92042a0ddb4485b05d357f4d65e72d, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:19,804 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/8a92042a0ddb4485b05d357f4d65e72d is 175, key is test_row_0/A:col10/1731328999100/Put/seqid=0 2024-11-11T12:43:19,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742247_1423 (size=39949) 2024-11-11T12:43:19,876 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:19,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 175 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329059872, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:20,081 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:20,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 177 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329060078, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:20,218 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=284, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/8a92042a0ddb4485b05d357f4d65e72d 2024-11-11T12:43:20,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/ae79f5b983874dc3a01aeb0b51509047 is 50, key is test_row_0/B:col10/1731328999100/Put/seqid=0 2024-11-11T12:43:20,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742248_1424 (size=12301) 2024-11-11T12:43:20,245 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/ae79f5b983874dc3a01aeb0b51509047 2024-11-11T12:43:20,256 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/657988a4c4d54b4f956aeeac925379cd is 50, key is test_row_0/C:col10/1731328999100/Put/seqid=0 2024-11-11T12:43:20,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742249_1425 (size=12301) 2024-11-11T12:43:20,294 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=284 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/657988a4c4d54b4f956aeeac925379cd 2024-11-11T12:43:20,299 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:20,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329060293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:20,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/8a92042a0ddb4485b05d357f4d65e72d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8a92042a0ddb4485b05d357f4d65e72d 2024-11-11T12:43:20,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:20,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329060298, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:20,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=112 2024-11-11T12:43:20,304 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 112 completed 2024-11-11T12:43:20,304 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8a92042a0ddb4485b05d357f4d65e72d, entries=200, sequenceid=284, filesize=39.0 K 2024-11-11T12:43:20,305 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/ae79f5b983874dc3a01aeb0b51509047 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ae79f5b983874dc3a01aeb0b51509047 2024-11-11T12:43:20,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:20,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees 2024-11-11T12:43:20,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-11T12:43:20,320 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:20,321 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=114, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:20,321 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:20,324 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ae79f5b983874dc3a01aeb0b51509047, entries=150, sequenceid=284, filesize=12.0 K 2024-11-11T12:43:20,330 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/657988a4c4d54b4f956aeeac925379cd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/657988a4c4d54b4f956aeeac925379cd 2024-11-11T12:43:20,336 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/657988a4c4d54b4f956aeeac925379cd, entries=150, sequenceid=284, filesize=12.0 K 2024-11-11T12:43:20,337 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for 99827bdf8e81fc8bb34d29fe73f0a358 in 598ms, sequenceid=284, compaction requested=true 2024-11-11T12:43:20,337 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:20,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:20,337 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:20,338 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:20,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:20,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:20,338 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:20,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:20,338 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:20,338 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 102739 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:20,338 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37183 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:20,338 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:20,338 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:20,338 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,339 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,339 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d8726f5b3f6d4fedbefb2578be663e8e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/80465fde17d74958be3749c1a66af50c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8a92042a0ddb4485b05d357f4d65e72d] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=100.3 K 2024-11-11T12:43:20,339 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,339 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/43b48fedfae54b04913cf5c4144b694d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8be456b3f3124fe7a88727915bf086a3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ae79f5b983874dc3a01aeb0b51509047] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=36.3 K 2024-11-11T12:43:20,339 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d8726f5b3f6d4fedbefb2578be663e8e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/80465fde17d74958be3749c1a66af50c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8a92042a0ddb4485b05d357f4d65e72d] 2024-11-11T12:43:20,339 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8726f5b3f6d4fedbefb2578be663e8e, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731328996973 2024-11-11T12:43:20,339 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 43b48fedfae54b04913cf5c4144b694d, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731328996973 2024-11-11T12:43:20,340 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8be456b3f3124fe7a88727915bf086a3, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731328998111 2024-11-11T12:43:20,340 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80465fde17d74958be3749c1a66af50c, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731328998111 2024-11-11T12:43:20,340 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8a92042a0ddb4485b05d357f4d65e72d, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1731328999082 2024-11-11T12:43:20,340 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ae79f5b983874dc3a01aeb0b51509047, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1731328999082 2024-11-11T12:43:20,362 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,363 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#355 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:20,365 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/bec61211c65946e38a3addc193d4178c is 50, key is test_row_0/B:col10/1731328999100/Put/seqid=0 2024-11-11T12:43:20,366 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111e76c98d5ffb24ad08fd3f8f94a6f8fd1_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,368 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111e76c98d5ffb24ad08fd3f8f94a6f8fd1_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,368 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e76c98d5ffb24ad08fd3f8f94a6f8fd1_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742251_1427 (size=4469) 2024-11-11T12:43:20,389 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#356 average throughput is 0.90 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:20,390 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/6f005f383fdd4629adfae50a9d4657e5 is 175, key is test_row_0/A:col10/1731328999100/Put/seqid=0 2024-11-11T12:43:20,396 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742252_1428 (size=31937) 2024-11-11T12:43:20,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742250_1426 (size=12983) 2024-11-11T12:43:20,403 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:20,403 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:43:20,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:20,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:20,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,403 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:20,404 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411113e851205cdcc484aaaac1d6c5e479bf8_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731329000401/Put/seqid=0 2024-11-11T12:43:20,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-11T12:43:20,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742253_1429 (size=14994) 2024-11-11T12:43:20,448 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:20,454 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411113e851205cdcc484aaaac1d6c5e479bf8_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411113e851205cdcc484aaaac1d6c5e479bf8_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:20,455 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/78d89f44015642d4a3cb54c113167c17, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,456 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/78d89f44015642d4a3cb54c113167c17 is 175, key is test_row_0/A:col10/1731329000401/Put/seqid=0 2024-11-11T12:43:20,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742254_1430 (size=39949) 2024-11-11T12:43:20,462 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=295, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/78d89f44015642d4a3cb54c113167c17 2024-11-11T12:43:20,473 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:20,474 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-11T12:43:20,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:20,474 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,475 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] handler.RSProcedureHandler(58): pid=115 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:20,475 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=115 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:20,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=115 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:20,490 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/ed9d930cb20f44c1b9521d7c448e3190 is 50, key is test_row_0/B:col10/1731329000401/Put/seqid=0 2024-11-11T12:43:20,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742255_1431 (size=12301) 2024-11-11T12:43:20,499 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/ed9d930cb20f44c1b9521d7c448e3190 2024-11-11T12:43:20,531 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/b05b864d7ed84dfba21e2419cec3a727 is 50, key is test_row_0/C:col10/1731329000401/Put/seqid=0 2024-11-11T12:43:20,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742256_1432 (size=12301) 2024-11-11T12:43:20,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=295 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/b05b864d7ed84dfba21e2419cec3a727 2024-11-11T12:43:20,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:20,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 203 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329060535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:20,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/78d89f44015642d4a3cb54c113167c17 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/78d89f44015642d4a3cb54c113167c17 2024-11-11T12:43:20,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/78d89f44015642d4a3cb54c113167c17, entries=200, sequenceid=295, filesize=39.0 K 2024-11-11T12:43:20,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/ed9d930cb20f44c1b9521d7c448e3190 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ed9d930cb20f44c1b9521d7c448e3190 2024-11-11T12:43:20,572 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ed9d930cb20f44c1b9521d7c448e3190, entries=150, sequenceid=295, filesize=12.0 K 2024-11-11T12:43:20,574 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/b05b864d7ed84dfba21e2419cec3a727 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/b05b864d7ed84dfba21e2419cec3a727 2024-11-11T12:43:20,583 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/b05b864d7ed84dfba21e2419cec3a727, entries=150, sequenceid=295, filesize=12.0 K 2024-11-11T12:43:20,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for 99827bdf8e81fc8bb34d29fe73f0a358 in 182ms, sequenceid=295, compaction requested=true 2024-11-11T12:43:20,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:20,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:20,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:43:20,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:20,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-11T12:43:20,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:20,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-11T12:43:20,627 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:20,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-11T12:43:20,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=115 2024-11-11T12:43:20,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,630 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:43:20,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:20,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:20,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:20,630 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,641 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411119ad013b9e98a4319b241e97cdc0d5785_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731329000511/Put/seqid=0 2024-11-11T12:43:20,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:20,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:20,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742257_1433 (size=12454) 2024-11-11T12:43:20,662 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:20,669 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411119ad013b9e98a4319b241e97cdc0d5785_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119ad013b9e98a4319b241e97cdc0d5785_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:20,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/a61cc9dc0d0a4b30b13704aeec2df4fb, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,670 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/a61cc9dc0d0a4b30b13704aeec2df4fb is 175, key is test_row_0/A:col10/1731329000511/Put/seqid=0 2024-11-11T12:43:20,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742258_1434 (size=31255) 2024-11-11T12:43:20,675 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=320, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/a61cc9dc0d0a4b30b13704aeec2df4fb 2024-11-11T12:43:20,691 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/cf6225490fef4dd59d704995f5fa44fa is 50, key is test_row_0/B:col10/1731329000511/Put/seqid=0 2024-11-11T12:43:20,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742259_1435 (size=12301) 2024-11-11T12:43:20,702 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/cf6225490fef4dd59d704995f5fa44fa 2024-11-11T12:43:20,707 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/1deb957874d742b18fbf37134a939af7 is 50, key is test_row_0/C:col10/1731329000511/Put/seqid=0 2024-11-11T12:43:20,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742260_1436 (size=12301) 2024-11-11T12:43:20,713 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=320 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/1deb957874d742b18fbf37134a939af7 2024-11-11T12:43:20,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/a61cc9dc0d0a4b30b13704aeec2df4fb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/a61cc9dc0d0a4b30b13704aeec2df4fb 2024-11-11T12:43:20,720 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/a61cc9dc0d0a4b30b13704aeec2df4fb, entries=150, sequenceid=320, filesize=30.5 K 2024-11-11T12:43:20,720 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/cf6225490fef4dd59d704995f5fa44fa as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/cf6225490fef4dd59d704995f5fa44fa 2024-11-11T12:43:20,724 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/cf6225490fef4dd59d704995f5fa44fa, entries=150, sequenceid=320, filesize=12.0 K 2024-11-11T12:43:20,725 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/1deb957874d742b18fbf37134a939af7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1deb957874d742b18fbf37134a939af7 2024-11-11T12:43:20,727 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1deb957874d742b18fbf37134a939af7, entries=150, sequenceid=320, filesize=12.0 K 2024-11-11T12:43:20,728 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for 99827bdf8e81fc8bb34d29fe73f0a358 in 98ms, sequenceid=320, compaction requested=true 2024-11-11T12:43:20,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:20,728 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,729 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=115}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=115 2024-11-11T12:43:20,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=115 2024-11-11T12:43:20,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:20,731 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-11T12:43:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:20,732 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:20,734 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=115, resume processing ppid=114 2024-11-11T12:43:20,734 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, ppid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 412 msec 2024-11-11T12:43:20,736 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=114, table=TestAcidGuarantees in 429 msec 2024-11-11T12:43:20,761 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111c94c38f4c1f94a26aacc977dbf3bd21e_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731329000721/Put/seqid=0 2024-11-11T12:43:20,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742261_1437 (size=14994) 2024-11-11T12:43:20,806 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/6f005f383fdd4629adfae50a9d4657e5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/6f005f383fdd4629adfae50a9d4657e5 2024-11-11T12:43:20,810 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/bec61211c65946e38a3addc193d4178c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bec61211c65946e38a3addc193d4178c 2024-11-11T12:43:20,811 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into 6f005f383fdd4629adfae50a9d4657e5(size=31.2 K), total size for store is 100.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:20,811 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:20,811 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=13, startTime=1731329000337; duration=0sec 2024-11-11T12:43:20,811 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-11T12:43:20,811 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:20,811 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:20,811 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-11T12:43:20,814 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61751 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-11T12:43:20,814 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/C is initiating minor compaction (all files) 2024-11-11T12:43:20,814 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/C in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,814 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/590e2f8a81e64058b4d98ca64b32008e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/09bce5af97354beca8649b8fd1b23eec, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/657988a4c4d54b4f956aeeac925379cd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/b05b864d7ed84dfba21e2419cec3a727, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1deb957874d742b18fbf37134a939af7] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=60.3 K 2024-11-11T12:43:20,814 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 590e2f8a81e64058b4d98ca64b32008e, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=244, earliestPutTs=1731328996973 2024-11-11T12:43:20,814 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 09bce5af97354beca8649b8fd1b23eec, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731328998111 2024-11-11T12:43:20,816 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 657988a4c4d54b4f956aeeac925379cd, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1731328999082 2024-11-11T12:43:20,817 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into bec61211c65946e38a3addc193d4178c(size=12.7 K), total size for store is 36.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:20,817 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:20,817 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=13, startTime=1731329000337; duration=0sec 2024-11-11T12:43:20,817 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-11T12:43:20,817 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:20,817 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:20,817 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:20,817 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b05b864d7ed84dfba21e2419cec3a727, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731328999769 2024-11-11T12:43:20,818 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 103141 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:20,818 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:20,818 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,818 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/6f005f383fdd4629adfae50a9d4657e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/78d89f44015642d4a3cb54c113167c17, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/a61cc9dc0d0a4b30b13704aeec2df4fb] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=100.7 K 2024-11-11T12:43:20,819 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:20,819 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/6f005f383fdd4629adfae50a9d4657e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/78d89f44015642d4a3cb54c113167c17, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/a61cc9dc0d0a4b30b13704aeec2df4fb] 2024-11-11T12:43:20,819 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1deb957874d742b18fbf37134a939af7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1731329000511 2024-11-11T12:43:20,819 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 6f005f383fdd4629adfae50a9d4657e5, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1731328999082 2024-11-11T12:43:20,820 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 78d89f44015642d4a3cb54c113167c17, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731328999768 2024-11-11T12:43:20,821 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a61cc9dc0d0a4b30b13704aeec2df4fb, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1731329000511 2024-11-11T12:43:20,830 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,840 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111125151ea2fc404f4292a7df139269e667_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,846 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111125151ea2fc404f4292a7df139269e667_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,846 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111125151ea2fc404f4292a7df139269e667_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:20,847 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#C#compaction#365 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:20,847 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/2d0ea1b82ef14297bee859667293e160 is 50, key is test_row_0/C:col10/1731329000511/Put/seqid=0 2024-11-11T12:43:20,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742262_1438 (size=4469) 2024-11-11T12:43:20,876 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:20,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 235 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329060871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:20,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742263_1439 (size=13017) 2024-11-11T12:43:20,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=114 2024-11-11T12:43:20,930 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 114 completed 2024-11-11T12:43:20,931 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:20,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees 2024-11-11T12:43:20,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-11T12:43:20,933 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:20,933 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=116, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:20,933 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=117, ppid=116, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:20,984 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:20,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 237 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329060977, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:21,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-11T12:43:21,085 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:21,091 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-11T12:43:21,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:21,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:21,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:21,091 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:21,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:21,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:21,175 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:21,183 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111c94c38f4c1f94a26aacc977dbf3bd21e_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c94c38f4c1f94a26aacc977dbf3bd21e_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:21,188 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/46e3f9cf727c4050984484e1b891637e, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:21,189 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/46e3f9cf727c4050984484e1b891637e is 175, key is test_row_0/A:col10/1731329000721/Put/seqid=0 2024-11-11T12:43:21,192 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:21,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 239 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329061185, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:21,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742264_1440 (size=39949) 2024-11-11T12:43:21,204 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=332, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/46e3f9cf727c4050984484e1b891637e 2024-11-11T12:43:21,225 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/f84dcadc79dd49aaa0ccb4f8f7f03370 is 50, key is test_row_0/B:col10/1731329000721/Put/seqid=0 2024-11-11T12:43:21,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742265_1441 (size=12301) 2024-11-11T12:43:21,232 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/f84dcadc79dd49aaa0ccb4f8f7f03370 2024-11-11T12:43:21,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-11T12:43:21,244 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:21,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-11T12:43:21,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:21,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:21,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:21,245 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] handler.RSProcedureHandler(58): pid=117 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:21,245 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=117 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:21,245 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=117 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:21,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/9c22ddb5a88e409f86b4bc7b002ca5fa is 50, key is test_row_0/C:col10/1731329000721/Put/seqid=0 2024-11-11T12:43:21,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742266_1442 (size=12301) 2024-11-11T12:43:21,262 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=332 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/9c22ddb5a88e409f86b4bc7b002ca5fa 2024-11-11T12:43:21,266 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#364 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:21,267 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/fb958e804f91420cb1c32846a9d68874 is 175, key is test_row_0/A:col10/1731329000511/Put/seqid=0 2024-11-11T12:43:21,281 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/46e3f9cf727c4050984484e1b891637e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/46e3f9cf727c4050984484e1b891637e 2024-11-11T12:43:21,290 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/46e3f9cf727c4050984484e1b891637e, entries=200, sequenceid=332, filesize=39.0 K 2024-11-11T12:43:21,304 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/f84dcadc79dd49aaa0ccb4f8f7f03370 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/f84dcadc79dd49aaa0ccb4f8f7f03370 2024-11-11T12:43:21,305 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/2d0ea1b82ef14297bee859667293e160 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/2d0ea1b82ef14297bee859667293e160 2024-11-11T12:43:21,312 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/f84dcadc79dd49aaa0ccb4f8f7f03370, entries=150, sequenceid=332, filesize=12.0 K 2024-11-11T12:43:21,312 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/9c22ddb5a88e409f86b4bc7b002ca5fa as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/9c22ddb5a88e409f86b4bc7b002ca5fa 2024-11-11T12:43:21,314 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/C of 99827bdf8e81fc8bb34d29fe73f0a358 into 2d0ea1b82ef14297bee859667293e160(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:21,314 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:21,314 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/C, priority=11, startTime=1731329000585; duration=0sec 2024-11-11T12:43:21,314 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:43:21,314 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:21,314 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:21,316 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49886 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:21,316 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:21,316 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:21,316 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bec61211c65946e38a3addc193d4178c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ed9d930cb20f44c1b9521d7c448e3190, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/cf6225490fef4dd59d704995f5fa44fa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/f84dcadc79dd49aaa0ccb4f8f7f03370] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=48.7 K 2024-11-11T12:43:21,317 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting bec61211c65946e38a3addc193d4178c, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=284, earliestPutTs=1731328999082 2024-11-11T12:43:21,317 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed9d930cb20f44c1b9521d7c448e3190, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=295, earliestPutTs=1731328999769 2024-11-11T12:43:21,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742267_1443 (size=32039) 2024-11-11T12:43:21,320 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting cf6225490fef4dd59d704995f5fa44fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1731329000511 2024-11-11T12:43:21,320 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/9c22ddb5a88e409f86b4bc7b002ca5fa, entries=150, sequenceid=332, filesize=12.0 K 2024-11-11T12:43:21,321 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting f84dcadc79dd49aaa0ccb4f8f7f03370, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1731329000681 2024-11-11T12:43:21,322 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for 99827bdf8e81fc8bb34d29fe73f0a358 in 591ms, sequenceid=332, compaction requested=true 2024-11-11T12:43:21,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:21,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:21,322 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:43:21,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:21,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-11T12:43:21,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:21,323 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-11T12:43:21,332 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#368 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:21,333 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/da98e255f64d4728a8593b01ae0da76f is 50, key is test_row_0/B:col10/1731329000721/Put/seqid=0 2024-11-11T12:43:21,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742268_1444 (size=13119) 2024-11-11T12:43:21,380 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/da98e255f64d4728a8593b01ae0da76f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/da98e255f64d4728a8593b01ae0da76f 2024-11-11T12:43:21,398 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:21,403 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into da98e255f64d4728a8593b01ae0da76f(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:21,403 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:21,403 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=12, startTime=1731329000585; duration=0sec 2024-11-11T12:43:21,403 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-11T12:43:21,403 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:21,403 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-11-11T12:43:21,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=117 2024-11-11T12:43:21,404 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:21,404 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:43:21,404 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:43:21,404 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:43:21,404 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. because compaction request was cancelled 2024-11-11T12:43:21,404 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:21,404 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:21,405 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:21,405 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-11T12:43:21,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:21,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:21,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:21,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:21,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:21,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:21,406 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:43:21,407 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:43:21,407 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. because compaction request was cancelled 2024-11-11T12:43:21,407 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:21,407 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-11T12:43:21,418 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:43:21,418 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:43:21,418 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. because compaction request was cancelled 2024-11-11T12:43:21,418 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:21,418 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-11T12:43:21,419 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:43:21,419 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:43:21,419 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. because compaction request was cancelled 2024-11-11T12:43:21,419 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:21,436 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111bcac93fa22484b9aaa72a4e7e2906704_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731329000833/Put/seqid=0 2024-11-11T12:43:21,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742269_1445 (size=12454) 2024-11-11T12:43:21,489 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:21,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:21,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:21,510 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111bcac93fa22484b9aaa72a4e7e2906704_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111bcac93fa22484b9aaa72a4e7e2906704_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:21,510 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c49c02d33d644eef89095d5251e33557, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:21,511 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c49c02d33d644eef89095d5251e33557 is 175, key is test_row_0/A:col10/1731329000833/Put/seqid=0 2024-11-11T12:43:21,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742270_1446 (size=31255) 2024-11-11T12:43:21,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-11T12:43:21,549 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:21,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 250 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329061543, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:21,656 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:21,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 252 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329061653, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:21,723 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/fb958e804f91420cb1c32846a9d68874 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/fb958e804f91420cb1c32846a9d68874 2024-11-11T12:43:21,727 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into fb958e804f91420cb1c32846a9d68874(size=31.3 K), total size for store is 70.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:21,727 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:21,728 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=13, startTime=1731329000585; duration=0sec 2024-11-11T12:43:21,728 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:21,728 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:21,859 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:21,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 254 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329061857, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:21,930 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=360, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c49c02d33d644eef89095d5251e33557 2024-11-11T12:43:21,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/50e0868a5e16447daf0654ae2b398d35 is 50, key is test_row_0/B:col10/1731329000833/Put/seqid=0 2024-11-11T12:43:21,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742271_1447 (size=12301) 2024-11-11T12:43:21,968 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/50e0868a5e16447daf0654ae2b398d35 2024-11-11T12:43:21,982 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/8e8973b542c6439a8d1072c13e2ac27f is 50, key is test_row_0/C:col10/1731329000833/Put/seqid=0 2024-11-11T12:43:21,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742272_1448 (size=12301) 2024-11-11T12:43:21,992 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=360 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/8e8973b542c6439a8d1072c13e2ac27f 2024-11-11T12:43:22,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c49c02d33d644eef89095d5251e33557 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c49c02d33d644eef89095d5251e33557 2024-11-11T12:43:22,018 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c49c02d33d644eef89095d5251e33557, entries=150, sequenceid=360, filesize=30.5 K 2024-11-11T12:43:22,019 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/50e0868a5e16447daf0654ae2b398d35 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/50e0868a5e16447daf0654ae2b398d35 2024-11-11T12:43:22,024 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/50e0868a5e16447daf0654ae2b398d35, entries=150, sequenceid=360, filesize=12.0 K 2024-11-11T12:43:22,025 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/8e8973b542c6439a8d1072c13e2ac27f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e8973b542c6439a8d1072c13e2ac27f 2024-11-11T12:43:22,029 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e8973b542c6439a8d1072c13e2ac27f, entries=150, sequenceid=360, filesize=12.0 K 2024-11-11T12:43:22,030 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for 99827bdf8e81fc8bb34d29fe73f0a358 in 626ms, sequenceid=360, compaction requested=true 2024-11-11T12:43:22,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:22,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:22,030 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=117}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=117 2024-11-11T12:43:22,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=117 2024-11-11T12:43:22,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-11T12:43:22,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=116 2024-11-11T12:43:22,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.1050 sec 2024-11-11T12:43:22,050 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=116, table=TestAcidGuarantees in 1.1180 sec 2024-11-11T12:43:22,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:22,173 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:43:22,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:22,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:22,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:22,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:22,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:22,173 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:22,190 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411118d2fea575cf94ea9b3f9aef30fff4353_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731329001527/Put/seqid=0 2024-11-11T12:43:22,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742273_1449 (size=14994) 2024-11-11T12:43:22,318 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 275 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329062315, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,323 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60908 deadline: 1731329062320, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,328 DEBUG [Thread-1568 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4184 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:22,329 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329062324, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 277 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329062432, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,436 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329062434, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,616 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:22,625 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411118d2fea575cf94ea9b3f9aef30fff4353_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411118d2fea575cf94ea9b3f9aef30fff4353_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:22,626 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/101610660be54680b09e6e5726790334, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:22,627 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/101610660be54680b09e6e5726790334 is 175, key is test_row_0/A:col10/1731329001527/Put/seqid=0 2024-11-11T12:43:22,643 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 279 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329062637, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,644 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329062643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742274_1450 (size=39949) 2024-11-11T12:43:22,821 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60894 deadline: 1731329062814, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,822 DEBUG [Thread-1572 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18181 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_0' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:22,848 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60954 deadline: 1731329062836, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,850 DEBUG [Thread-1564 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=8, retries=16, started=18208 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:22,952 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329062952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:22,956 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:22,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 281 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329062952, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=116 2024-11-11T12:43:23,038 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 116 completed 2024-11-11T12:43:23,048 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:23,052 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=374, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/101610660be54680b09e6e5726790334 2024-11-11T12:43:23,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees 2024-11-11T12:43:23,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-11T12:43:23,060 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:23,064 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=118, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:23,064 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=118, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:23,066 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/9f8427eef6f540a5a1f30d490a6bd869 is 50, key is test_row_0/B:col10/1731329001527/Put/seqid=0 2024-11-11T12:43:23,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742275_1451 (size=12301) 2024-11-11T12:43:23,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-11T12:43:23,227 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,227 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-11T12:43:23,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:23,227 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,227 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,228 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-11T12:43:23,379 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,380 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-11T12:43:23,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:23,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,380 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,380 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,463 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:23,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329063460, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:23,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 283 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329063461, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,496 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/9f8427eef6f540a5a1f30d490a6bd869 2024-11-11T12:43:23,506 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/789485d2fc494b5ebe3607411fd3ae72 is 50, key is test_row_0/C:col10/1731329001527/Put/seqid=0 2024-11-11T12:43:23,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742276_1452 (size=12301) 2024-11-11T12:43:23,534 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-11T12:43:23,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:23,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,535 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,535 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-11T12:43:23,687 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-11T12:43:23,689 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:23,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,690 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,690 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,841 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,841 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-11T12:43:23,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:23,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,842 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] handler.RSProcedureHandler(58): pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=119 java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=119 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:23,921 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=374 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/789485d2fc494b5ebe3607411fd3ae72 2024-11-11T12:43:23,926 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/101610660be54680b09e6e5726790334 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/101610660be54680b09e6e5726790334 2024-11-11T12:43:23,931 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/101610660be54680b09e6e5726790334, entries=200, sequenceid=374, filesize=39.0 K 2024-11-11T12:43:23,932 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/9f8427eef6f540a5a1f30d490a6bd869 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/9f8427eef6f540a5a1f30d490a6bd869 2024-11-11T12:43:23,937 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/9f8427eef6f540a5a1f30d490a6bd869, entries=150, sequenceid=374, filesize=12.0 K 2024-11-11T12:43:23,938 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/789485d2fc494b5ebe3607411fd3ae72 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/789485d2fc494b5ebe3607411fd3ae72 2024-11-11T12:43:23,949 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/789485d2fc494b5ebe3607411fd3ae72, entries=150, sequenceid=374, filesize=12.0 K 2024-11-11T12:43:23,950 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for 99827bdf8e81fc8bb34d29fe73f0a358 in 1777ms, sequenceid=374, compaction requested=true 2024-11-11T12:43:23,950 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:23,950 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:23,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:23,950 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:23,950 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:23,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:23,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:23,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:23,951 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:23,960 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:23,960 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:23,960 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,960 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/da98e255f64d4728a8593b01ae0da76f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/50e0868a5e16447daf0654ae2b398d35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/9f8427eef6f540a5a1f30d490a6bd869] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=36.8 K 2024-11-11T12:43:23,961 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 143192 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:23,961 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:23,961 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,961 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/fb958e804f91420cb1c32846a9d68874, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/46e3f9cf727c4050984484e1b891637e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c49c02d33d644eef89095d5251e33557, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/101610660be54680b09e6e5726790334] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=139.8 K 2024-11-11T12:43:23,961 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,961 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/fb958e804f91420cb1c32846a9d68874, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/46e3f9cf727c4050984484e1b891637e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c49c02d33d644eef89095d5251e33557, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/101610660be54680b09e6e5726790334] 2024-11-11T12:43:23,961 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting da98e255f64d4728a8593b01ae0da76f, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1731329000681 2024-11-11T12:43:23,962 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting fb958e804f91420cb1c32846a9d68874, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1731329000511 2024-11-11T12:43:23,968 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 50e0868a5e16447daf0654ae2b398d35, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1731329000833 2024-11-11T12:43:23,972 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 9f8427eef6f540a5a1f30d490a6bd869, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731329001527 2024-11-11T12:43:23,973 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 46e3f9cf727c4050984484e1b891637e, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1731329000681 2024-11-11T12:43:23,976 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting c49c02d33d644eef89095d5251e33557, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1731329000833 2024-11-11T12:43:23,980 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 101610660be54680b09e6e5726790334, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731329001527 2024-11-11T12:43:23,994 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:23,996 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=119 2024-11-11T12:43:23,996 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:23,996 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:43:23,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:23,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:23,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:23,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:23,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:23,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:24,027 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#375 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:24,027 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/45f06f59a88a4085b32c17f7e2c941b7 is 50, key is test_row_0/B:col10/1731329001527/Put/seqid=0 2024-11-11T12:43:24,034 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111199ead5202df24a32acae03d77fac5107_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731329002302/Put/seqid=0 2024-11-11T12:43:24,035 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:24,048 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111a8b491949c744acd8179fcd2e7e6bbb0_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:24,051 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111a8b491949c744acd8179fcd2e7e6bbb0_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:24,051 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111a8b491949c744acd8179fcd2e7e6bbb0_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:24,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742277_1453 (size=13221) 2024-11-11T12:43:24,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742278_1454 (size=12454) 2024-11-11T12:43:24,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:24,089 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111199ead5202df24a32acae03d77fac5107_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111199ead5202df24a32acae03d77fac5107_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:24,090 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/535c00e95db4495b94d61061b027f84b, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:24,091 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/535c00e95db4495b94d61061b027f84b is 175, key is test_row_0/A:col10/1731329002302/Put/seqid=0 2024-11-11T12:43:24,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742279_1455 (size=4469) 2024-11-11T12:43:24,097 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#377 average throughput is 0.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:24,098 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/3e44fb2847cd430f894f7afeaee43da4 is 175, key is test_row_0/A:col10/1731329001527/Put/seqid=0 2024-11-11T12:43:24,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742280_1456 (size=31255) 2024-11-11T12:43:24,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742281_1457 (size=32175) 2024-11-11T12:43:24,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-11T12:43:24,478 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. as already flushing 2024-11-11T12:43:24,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:24,519 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:24,519 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 290 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329064513, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:24,526 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:24,526 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329064519, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:24,528 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/45f06f59a88a4085b32c17f7e2c941b7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/45f06f59a88a4085b32c17f7e2c941b7 2024-11-11T12:43:24,540 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=397, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/535c00e95db4495b94d61061b027f84b 2024-11-11T12:43:24,575 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into 45f06f59a88a4085b32c17f7e2c941b7(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:24,575 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:24,575 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=13, startTime=1731329003950; duration=0sec 2024-11-11T12:43:24,575 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:24,575 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:24,576 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:24,583 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/3e44fb2847cd430f894f7afeaee43da4 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/3e44fb2847cd430f894f7afeaee43da4 2024-11-11T12:43:24,585 DEBUG [Thread-1581 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x37ec8e3b to 127.0.0.1:54294 2024-11-11T12:43:24,585 DEBUG [Thread-1581 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:24,586 DEBUG [Thread-1583 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x787e5169 to 127.0.0.1:54294 2024-11-11T12:43:24,587 DEBUG [Thread-1583 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:24,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/155e7887a9b949ed87659ab862ed30fb is 50, key is test_row_0/B:col10/1731329002302/Put/seqid=0 2024-11-11T12:43:24,589 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49920 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:24,589 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/C is initiating minor compaction (all files) 2024-11-11T12:43:24,589 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/C in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:24,589 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/2d0ea1b82ef14297bee859667293e160, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/9c22ddb5a88e409f86b4bc7b002ca5fa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e8973b542c6439a8d1072c13e2ac27f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/789485d2fc494b5ebe3607411fd3ae72] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=48.8 K 2024-11-11T12:43:24,589 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 2d0ea1b82ef14297bee859667293e160, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=320, earliestPutTs=1731329000511 2024-11-11T12:43:24,590 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 9c22ddb5a88e409f86b4bc7b002ca5fa, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=332, earliestPutTs=1731329000681 2024-11-11T12:43:24,590 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8e8973b542c6439a8d1072c13e2ac27f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=360, earliestPutTs=1731329000833 2024-11-11T12:43:24,590 DEBUG [Thread-1575 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e247aa1 to 127.0.0.1:54294 2024-11-11T12:43:24,590 DEBUG [Thread-1575 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:24,591 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 789485d2fc494b5ebe3607411fd3ae72, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731329001527 2024-11-11T12:43:24,597 DEBUG [Thread-1577 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2205f666 to 127.0.0.1:54294 2024-11-11T12:43:24,597 DEBUG [Thread-1579 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6584e9ce to 127.0.0.1:54294 2024-11-11T12:43:24,597 DEBUG [Thread-1577 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:24,598 DEBUG [Thread-1579 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:24,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742282_1458 (size=12301) 2024-11-11T12:43:24,604 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into 3e44fb2847cd430f894f7afeaee43da4(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:24,604 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:24,604 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=12, startTime=1731329003950; duration=0sec 2024-11-11T12:43:24,605 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:24,605 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:24,606 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/155e7887a9b949ed87659ab862ed30fb 2024-11-11T12:43:24,613 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#C#compaction#379 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:24,613 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/e7ca2f350d8e40b5b8dce30e6d1cea0e is 50, key is test_row_0/C:col10/1731329001527/Put/seqid=0 2024-11-11T12:43:24,619 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/bde5d6ad344f49b2b623e6e91c1f4ca9 is 50, key is test_row_0/C:col10/1731329002302/Put/seqid=0 2024-11-11T12:43:24,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742283_1459 (size=13153) 2024-11-11T12:43:24,623 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:24,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 292 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329064623, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:24,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742284_1460 (size=12301) 2024-11-11T12:43:24,628 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:24,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329064628, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:24,636 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/e7ca2f350d8e40b5b8dce30e6d1cea0e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/e7ca2f350d8e40b5b8dce30e6d1cea0e 2024-11-11T12:43:24,654 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/C of 99827bdf8e81fc8bb34d29fe73f0a358 into e7ca2f350d8e40b5b8dce30e6d1cea0e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:24,654 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:24,654 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/C, priority=12, startTime=1731329003951; duration=0sec 2024-11-11T12:43:24,654 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:24,654 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:24,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:24,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 294 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60964 deadline: 1731329064830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:24,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:24,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:60934 deadline: 1731329064830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:25,033 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=397 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/bde5d6ad344f49b2b623e6e91c1f4ca9 2024-11-11T12:43:25,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/535c00e95db4495b94d61061b027f84b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/535c00e95db4495b94d61061b027f84b 2024-11-11T12:43:25,041 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/535c00e95db4495b94d61061b027f84b, entries=150, sequenceid=397, filesize=30.5 K 2024-11-11T12:43:25,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/155e7887a9b949ed87659ab862ed30fb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/155e7887a9b949ed87659ab862ed30fb 2024-11-11T12:43:25,046 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/155e7887a9b949ed87659ab862ed30fb, entries=150, sequenceid=397, filesize=12.0 K 2024-11-11T12:43:25,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/bde5d6ad344f49b2b623e6e91c1f4ca9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/bde5d6ad344f49b2b623e6e91c1f4ca9 2024-11-11T12:43:25,051 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/bde5d6ad344f49b2b623e6e91c1f4ca9, entries=150, sequenceid=397, filesize=12.0 K 2024-11-11T12:43:25,052 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for 99827bdf8e81fc8bb34d29fe73f0a358 in 1056ms, sequenceid=397, compaction requested=false 2024-11-11T12:43:25,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:25,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:25,052 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=119}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=119 2024-11-11T12:43:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=119 2024-11-11T12:43:25,054 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=118 2024-11-11T12:43:25,055 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.9890 sec 2024-11-11T12:43:25,056 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=118, table=TestAcidGuarantees in 2.0060 sec 2024-11-11T12:43:25,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-11T12:43:25,135 DEBUG [Thread-1570 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09f472e0 to 127.0.0.1:54294 2024-11-11T12:43:25,135 DEBUG [Thread-1570 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:25,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:25,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:25,135 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:25,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:25,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:25,136 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:25,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:25,136 DEBUG [Thread-1566 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x72f422b4 to 127.0.0.1:54294 2024-11-11T12:43:25,136 DEBUG [Thread-1566 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:25,143 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111120212ee4849b48179e8061a9786dbb70_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731329005133/Put/seqid=0 2024-11-11T12:43:25,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742285_1461 (size=12454) 2024-11-11T12:43:25,152 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:25,157 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111120212ee4849b48179e8061a9786dbb70_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111120212ee4849b48179e8061a9786dbb70_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:25,158 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/8223453147204462ab37e78dc3bc29b1, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:25,159 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/8223453147204462ab37e78dc3bc29b1 is 175, key is test_row_0/A:col10/1731329005133/Put/seqid=0 2024-11-11T12:43:25,172 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=118 2024-11-11T12:43:25,172 INFO [Thread-1574 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 118 completed 2024-11-11T12:43:25,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742286_1462 (size=31255) 2024-11-11T12:43:25,190 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=415, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/8223453147204462ab37e78dc3bc29b1 2024-11-11T12:43:25,205 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/bfa169967cdd4d3cadfa09ad0410b342 is 50, key is test_row_0/B:col10/1731329005133/Put/seqid=0 2024-11-11T12:43:25,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742287_1463 (size=12301) 2024-11-11T12:43:25,239 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/bfa169967cdd4d3cadfa09ad0410b342 2024-11-11T12:43:25,250 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/ce951134150543b68aca62744b1b8fe7 is 50, key is test_row_0/C:col10/1731329005133/Put/seqid=0 2024-11-11T12:43:25,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742288_1464 (size=12301) 2024-11-11T12:43:25,669 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=415 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/ce951134150543b68aca62744b1b8fe7 2024-11-11T12:43:25,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/8223453147204462ab37e78dc3bc29b1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8223453147204462ab37e78dc3bc29b1 2024-11-11T12:43:25,678 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8223453147204462ab37e78dc3bc29b1, entries=150, sequenceid=415, filesize=30.5 K 2024-11-11T12:43:25,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/bfa169967cdd4d3cadfa09ad0410b342 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bfa169967cdd4d3cadfa09ad0410b342 2024-11-11T12:43:25,682 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bfa169967cdd4d3cadfa09ad0410b342, entries=150, sequenceid=415, filesize=12.0 K 2024-11-11T12:43:25,683 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/ce951134150543b68aca62744b1b8fe7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ce951134150543b68aca62744b1b8fe7 2024-11-11T12:43:25,689 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ce951134150543b68aca62744b1b8fe7, entries=150, sequenceid=415, filesize=12.0 K 2024-11-11T12:43:25,690 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=0 B/0 for 99827bdf8e81fc8bb34d29fe73f0a358 in 555ms, sequenceid=415, compaction requested=true 2024-11-11T12:43:25,690 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:25,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:25,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:25,690 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:25,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:25,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:25,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 99827bdf8e81fc8bb34d29fe73f0a358:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:25,690 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:43:25,690 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:25,691 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94685 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:25,691 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/A is initiating minor compaction (all files) 2024-11-11T12:43:25,691 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/A in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:25,691 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/3e44fb2847cd430f894f7afeaee43da4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/535c00e95db4495b94d61061b027f84b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8223453147204462ab37e78dc3bc29b1] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=92.5 K 2024-11-11T12:43:25,691 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:25,691 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/3e44fb2847cd430f894f7afeaee43da4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/535c00e95db4495b94d61061b027f84b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8223453147204462ab37e78dc3bc29b1] 2024-11-11T12:43:25,692 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37823 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:25,692 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e44fb2847cd430f894f7afeaee43da4, keycount=150, bloomtype=ROW, size=31.4 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731329001527 2024-11-11T12:43:25,692 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/B is initiating minor compaction (all files) 2024-11-11T12:43:25,692 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/B in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:25,692 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/45f06f59a88a4085b32c17f7e2c941b7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/155e7887a9b949ed87659ab862ed30fb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bfa169967cdd4d3cadfa09ad0410b342] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=36.9 K 2024-11-11T12:43:25,692 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 535c00e95db4495b94d61061b027f84b, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1731329002196 2024-11-11T12:43:25,693 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 45f06f59a88a4085b32c17f7e2c941b7, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731329001527 2024-11-11T12:43:25,693 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8223453147204462ab37e78dc3bc29b1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1731329004517 2024-11-11T12:43:25,693 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 155e7887a9b949ed87659ab862ed30fb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1731329002196 2024-11-11T12:43:25,694 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bfa169967cdd4d3cadfa09ad0410b342, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1731329004517 2024-11-11T12:43:25,709 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:25,710 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#B#compaction#384 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:25,711 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/83db3a29e8c440b491aafb754c2fc368 is 50, key is test_row_0/B:col10/1731329005133/Put/seqid=0 2024-11-11T12:43:25,712 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411118229cc7b53054927b8b56e3777f74b52_99827bdf8e81fc8bb34d29fe73f0a358 store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:25,741 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411118229cc7b53054927b8b56e3777f74b52_99827bdf8e81fc8bb34d29fe73f0a358, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:25,742 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411118229cc7b53054927b8b56e3777f74b52_99827bdf8e81fc8bb34d29fe73f0a358 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:25,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742289_1465 (size=13323) 2024-11-11T12:43:25,778 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742290_1466 (size=4469) 2024-11-11T12:43:25,782 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#A#compaction#385 average throughput is 0.33 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:25,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c30c0d2a1de948ddacb2cd346300c86e is 175, key is test_row_0/A:col10/1731329005133/Put/seqid=0 2024-11-11T12:43:25,783 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/83db3a29e8c440b491aafb754c2fc368 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/83db3a29e8c440b491aafb754c2fc368 2024-11-11T12:43:25,788 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/B of 99827bdf8e81fc8bb34d29fe73f0a358 into 83db3a29e8c440b491aafb754c2fc368(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:25,788 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:25,788 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/B, priority=13, startTime=1731329005690; duration=0sec 2024-11-11T12:43:25,788 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:25,788 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:B 2024-11-11T12:43:25,788 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:25,789 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37755 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:25,789 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): 99827bdf8e81fc8bb34d29fe73f0a358/C is initiating minor compaction (all files) 2024-11-11T12:43:25,789 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of 99827bdf8e81fc8bb34d29fe73f0a358/C in TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:25,789 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/e7ca2f350d8e40b5b8dce30e6d1cea0e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/bde5d6ad344f49b2b623e6e91c1f4ca9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ce951134150543b68aca62744b1b8fe7] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp, totalSize=36.9 K 2024-11-11T12:43:25,789 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e7ca2f350d8e40b5b8dce30e6d1cea0e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=374, earliestPutTs=1731329001527 2024-11-11T12:43:25,789 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bde5d6ad344f49b2b623e6e91c1f4ca9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=397, earliestPutTs=1731329002196 2024-11-11T12:43:25,790 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ce951134150543b68aca62744b1b8fe7, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=415, earliestPutTs=1731329004517 2024-11-11T12:43:25,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742291_1467 (size=32277) 2024-11-11T12:43:25,808 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 99827bdf8e81fc8bb34d29fe73f0a358#C#compaction#386 average throughput is 1.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:25,809 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/1d6a918cbc5b48bb87a6e9f9decadd72 is 50, key is test_row_0/C:col10/1731329005133/Put/seqid=0 2024-11-11T12:43:25,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742292_1468 (size=13255) 2024-11-11T12:43:25,844 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/1d6a918cbc5b48bb87a6e9f9decadd72 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1d6a918cbc5b48bb87a6e9f9decadd72 2024-11-11T12:43:25,859 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/C of 99827bdf8e81fc8bb34d29fe73f0a358 into 1d6a918cbc5b48bb87a6e9f9decadd72(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:25,859 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:25,859 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/C, priority=13, startTime=1731329005690; duration=0sec 2024-11-11T12:43:25,859 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:25,859 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:C 2024-11-11T12:43:26,199 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/c30c0d2a1de948ddacb2cd346300c86e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c30c0d2a1de948ddacb2cd346300c86e 2024-11-11T12:43:26,204 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in 99827bdf8e81fc8bb34d29fe73f0a358/A of 99827bdf8e81fc8bb34d29fe73f0a358 into c30c0d2a1de948ddacb2cd346300c86e(size=31.5 K), total size for store is 31.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:26,204 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:26,204 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358., storeName=99827bdf8e81fc8bb34d29fe73f0a358/A, priority=13, startTime=1731329005690; duration=0sec 2024-11-11T12:43:26,204 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:26,204 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 99827bdf8e81fc8bb34d29fe73f0a358:A 2024-11-11T12:43:26,343 DEBUG [Thread-1568 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2df33cdf to 127.0.0.1:54294 2024-11-11T12:43:26,343 DEBUG [Thread-1568 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:32,504 ERROR [LeaseRenewer:jenkins@localhost:42421 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:42421,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:32,874 DEBUG [Thread-1572 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x167a78b0 to 127.0.0.1:54294 2024-11-11T12:43:32,874 DEBUG [Thread-1572 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:32,938 DEBUG [Thread-1564 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3637e4c6 to 127.0.0.1:54294 2024-11-11T12:43:32,938 DEBUG [Thread-1564 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 6 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 82 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 48 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 181 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 9 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1760 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5279 rows 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1745 2024-11-11T12:43:32,938 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5235 rows 2024-11-11T12:43:32,939 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1771 2024-11-11T12:43:32,939 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5313 rows 2024-11-11T12:43:32,939 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1731 2024-11-11T12:43:32,939 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5193 rows 2024-11-11T12:43:32,939 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1762 2024-11-11T12:43:32,939 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5285 rows 2024-11-11T12:43:32,939 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T12:43:32,939 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1df61dc9 to 127.0.0.1:54294 2024-11-11T12:43:32,939 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:32,941 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-11T12:43:32,941 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-11T12:43:32,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=120, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:32,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-11T12:43:32,945 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329012944"}]},"ts":"1731329012944"} 2024-11-11T12:43:32,945 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-11T12:43:32,949 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-11T12:43:32,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:43:32,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, UNASSIGN}] 2024-11-11T12:43:32,951 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=122, ppid=121, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, UNASSIGN 2024-11-11T12:43:32,952 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=99827bdf8e81fc8bb34d29fe73f0a358, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:32,953 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:43:32,953 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; CloseRegionProcedure 99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:43:33,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-11T12:43:33,105 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:33,105 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(124): Close 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:33,105 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:43:33,105 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1681): Closing 99827bdf8e81fc8bb34d29fe73f0a358, disabling compactions & flushes 2024-11-11T12:43:33,105 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. after waiting 0 ms 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:33,106 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(2837): Flushing 99827bdf8e81fc8bb34d29fe73f0a358 3/3 column families, dataSize=20.13 KB heapSize=53.48 KB 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=A 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=B 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactingMemStore(205): FLUSHING TO DISK 99827bdf8e81fc8bb34d29fe73f0a358, store=C 2024-11-11T12:43:33,106 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:33,111 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111d4a4366c23a041fd928fb3ae552b0ff9_99827bdf8e81fc8bb34d29fe73f0a358 is 50, key is test_row_0/A:col10/1731329012873/Put/seqid=0 2024-11-11T12:43:33,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742293_1469 (size=9914) 2024-11-11T12:43:33,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-11T12:43:33,516 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:33,520 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111d4a4366c23a041fd928fb3ae552b0ff9_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111d4a4366c23a041fd928fb3ae552b0ff9_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:33,521 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/de0e965d908445838b911b54ab159671, store: [table=TestAcidGuarantees family=A region=99827bdf8e81fc8bb34d29fe73f0a358] 2024-11-11T12:43:33,521 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/de0e965d908445838b911b54ab159671 is 175, key is test_row_0/A:col10/1731329012873/Put/seqid=0 2024-11-11T12:43:33,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742294_1470 (size=22561) 2024-11-11T12:43:33,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-11T12:43:33,925 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=424, memsize=6.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/de0e965d908445838b911b54ab159671 2024-11-11T12:43:33,931 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/96b63a44212a47e0895a78b50af94b99 is 50, key is test_row_0/B:col10/1731329012873/Put/seqid=0 2024-11-11T12:43:33,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742295_1471 (size=9857) 2024-11-11T12:43:34,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-11T12:43:34,334 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/96b63a44212a47e0895a78b50af94b99 2024-11-11T12:43:34,340 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/cc5a8a920b5640068861f2dc0cb8d0a5 is 50, key is test_row_0/C:col10/1731329012873/Put/seqid=0 2024-11-11T12:43:34,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742296_1472 (size=9857) 2024-11-11T12:43:34,744 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.71 KB at sequenceid=424 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/cc5a8a920b5640068861f2dc0cb8d0a5 2024-11-11T12:43:34,747 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/A/de0e965d908445838b911b54ab159671 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/de0e965d908445838b911b54ab159671 2024-11-11T12:43:34,751 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/de0e965d908445838b911b54ab159671, entries=100, sequenceid=424, filesize=22.0 K 2024-11-11T12:43:34,752 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/B/96b63a44212a47e0895a78b50af94b99 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/96b63a44212a47e0895a78b50af94b99 2024-11-11T12:43:34,755 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/96b63a44212a47e0895a78b50af94b99, entries=100, sequenceid=424, filesize=9.6 K 2024-11-11T12:43:34,756 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/.tmp/C/cc5a8a920b5640068861f2dc0cb8d0a5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/cc5a8a920b5640068861f2dc0cb8d0a5 2024-11-11T12:43:34,759 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/cc5a8a920b5640068861f2dc0cb8d0a5, entries=100, sequenceid=424, filesize=9.6 K 2024-11-11T12:43:34,760 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(3040): Finished flush of dataSize ~20.13 KB/20610, heapSize ~53.44 KB/54720, currentSize=0 B/0 for 99827bdf8e81fc8bb34d29fe73f0a358 in 1654ms, sequenceid=424, compaction requested=false 2024-11-11T12:43:34,760 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e11dde57a90a45c8810b69e350c75929, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/244408076476443c924d96038a770766, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/5dd93ba1f79846aa8ce1ef8fceeea0cd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/b2e73d4f27e7408192442ffefab9e554, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ce3a6b4507144d768fb0598965f34ea8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c4c849d3afe742bda1cc7145dc8338f5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/575fec6ad1ab4f7f80ac076599e665bb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f4649293a31143369b0864c5962283de, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e5cbf8b78d044e3dba29d31a58b55f17, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d0a71c153c62478fbe9d2afbec6cb96b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/9bcd88e2c7034ad8ba4fb7d0cf290cfc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ece9e00889fc43799295c7c33a42211c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f0f7c412b6274afca4f780c1d6f8b2e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/004d68e483ad41c49148899b6a8febac, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1bdc5966b88543a79879e43561117c35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1b7325cfd679428ebb39104eb566e505, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d8726f5b3f6d4fedbefb2578be663e8e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/4975a4ed65ec4ea18c54c88686ab1972, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/80465fde17d74958be3749c1a66af50c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8a92042a0ddb4485b05d357f4d65e72d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/6f005f383fdd4629adfae50a9d4657e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/78d89f44015642d4a3cb54c113167c17, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/fb958e804f91420cb1c32846a9d68874, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/a61cc9dc0d0a4b30b13704aeec2df4fb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/46e3f9cf727c4050984484e1b891637e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c49c02d33d644eef89095d5251e33557, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/101610660be54680b09e6e5726790334, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/3e44fb2847cd430f894f7afeaee43da4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/535c00e95db4495b94d61061b027f84b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8223453147204462ab37e78dc3bc29b1] to archive 2024-11-11T12:43:34,761 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:43:34,762 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e11dde57a90a45c8810b69e350c75929 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e11dde57a90a45c8810b69e350c75929 2024-11-11T12:43:34,763 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/244408076476443c924d96038a770766 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/244408076476443c924d96038a770766 2024-11-11T12:43:34,765 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/5dd93ba1f79846aa8ce1ef8fceeea0cd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/5dd93ba1f79846aa8ce1ef8fceeea0cd 2024-11-11T12:43:34,766 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/b2e73d4f27e7408192442ffefab9e554 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/b2e73d4f27e7408192442ffefab9e554 2024-11-11T12:43:34,767 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ce3a6b4507144d768fb0598965f34ea8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ce3a6b4507144d768fb0598965f34ea8 2024-11-11T12:43:34,769 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c4c849d3afe742bda1cc7145dc8338f5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c4c849d3afe742bda1cc7145dc8338f5 2024-11-11T12:43:34,770 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/575fec6ad1ab4f7f80ac076599e665bb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/575fec6ad1ab4f7f80ac076599e665bb 2024-11-11T12:43:34,771 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f4649293a31143369b0864c5962283de to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f4649293a31143369b0864c5962283de 2024-11-11T12:43:34,772 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e5cbf8b78d044e3dba29d31a58b55f17 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/e5cbf8b78d044e3dba29d31a58b55f17 2024-11-11T12:43:34,773 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d0a71c153c62478fbe9d2afbec6cb96b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d0a71c153c62478fbe9d2afbec6cb96b 2024-11-11T12:43:34,774 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/9bcd88e2c7034ad8ba4fb7d0cf290cfc to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/9bcd88e2c7034ad8ba4fb7d0cf290cfc 2024-11-11T12:43:34,775 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ece9e00889fc43799295c7c33a42211c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/ece9e00889fc43799295c7c33a42211c 2024-11-11T12:43:34,776 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f0f7c412b6274afca4f780c1d6f8b2e5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/f0f7c412b6274afca4f780c1d6f8b2e5 2024-11-11T12:43:34,777 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/004d68e483ad41c49148899b6a8febac to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/004d68e483ad41c49148899b6a8febac 2024-11-11T12:43:34,779 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1bdc5966b88543a79879e43561117c35 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1bdc5966b88543a79879e43561117c35 2024-11-11T12:43:34,780 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1b7325cfd679428ebb39104eb566e505 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/1b7325cfd679428ebb39104eb566e505 2024-11-11T12:43:34,781 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d8726f5b3f6d4fedbefb2578be663e8e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/d8726f5b3f6d4fedbefb2578be663e8e 2024-11-11T12:43:34,782 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/4975a4ed65ec4ea18c54c88686ab1972 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/4975a4ed65ec4ea18c54c88686ab1972 2024-11-11T12:43:34,785 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/80465fde17d74958be3749c1a66af50c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/80465fde17d74958be3749c1a66af50c 2024-11-11T12:43:34,786 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8a92042a0ddb4485b05d357f4d65e72d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8a92042a0ddb4485b05d357f4d65e72d 2024-11-11T12:43:34,787 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/6f005f383fdd4629adfae50a9d4657e5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/6f005f383fdd4629adfae50a9d4657e5 2024-11-11T12:43:34,788 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/78d89f44015642d4a3cb54c113167c17 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/78d89f44015642d4a3cb54c113167c17 2024-11-11T12:43:34,789 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/fb958e804f91420cb1c32846a9d68874 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/fb958e804f91420cb1c32846a9d68874 2024-11-11T12:43:34,790 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/a61cc9dc0d0a4b30b13704aeec2df4fb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/a61cc9dc0d0a4b30b13704aeec2df4fb 2024-11-11T12:43:34,791 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/46e3f9cf727c4050984484e1b891637e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/46e3f9cf727c4050984484e1b891637e 2024-11-11T12:43:34,792 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c49c02d33d644eef89095d5251e33557 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c49c02d33d644eef89095d5251e33557 2024-11-11T12:43:34,793 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/101610660be54680b09e6e5726790334 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/101610660be54680b09e6e5726790334 2024-11-11T12:43:34,794 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/3e44fb2847cd430f894f7afeaee43da4 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/3e44fb2847cd430f894f7afeaee43da4 2024-11-11T12:43:34,794 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/535c00e95db4495b94d61061b027f84b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/535c00e95db4495b94d61061b027f84b 2024-11-11T12:43:34,795 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8223453147204462ab37e78dc3bc29b1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/8223453147204462ab37e78dc3bc29b1 2024-11-11T12:43:34,796 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/550498b38c064ae8b3361f4c237e5731, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1d777de6bdec4b6388566065ca466a70, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1f2203ef17f4480da1ad14db506efb47, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/66f34f4b4b0144d0b192d38cbe7b953c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4f214173249243a6aeeff5debc473489, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bd5567cb4d3a432cbcd373cba4c11f70, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/0920785fe52c4d8c8b7e822c003a4a4a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/694b27b0b3d74dd39f2393a2946381a7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/270debd3692442d5bade68f8336bf76a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/b35e5c07e1d54864b289c294ce9352bb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/212cf74028e44fca91870f4ef64ea34f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/d8cdbae8c862489cb9304e7831a31a0f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/c3f8203c435646c19aff0a4d28b1676d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/eadbf14c611e4d8e9299252f8a97270b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8abaa8dea7344c81a0e84df827b0cb99, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4de76553e9c14e0391d4fa78181f7b5a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/43b48fedfae54b04913cf5c4144b694d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/3e61ac387dd040a8bec40ab9b7a14fc9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8be456b3f3124fe7a88727915bf086a3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bec61211c65946e38a3addc193d4178c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ae79f5b983874dc3a01aeb0b51509047, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ed9d930cb20f44c1b9521d7c448e3190, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/cf6225490fef4dd59d704995f5fa44fa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/da98e255f64d4728a8593b01ae0da76f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/f84dcadc79dd49aaa0ccb4f8f7f03370, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/50e0868a5e16447daf0654ae2b398d35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/45f06f59a88a4085b32c17f7e2c941b7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/9f8427eef6f540a5a1f30d490a6bd869, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/155e7887a9b949ed87659ab862ed30fb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bfa169967cdd4d3cadfa09ad0410b342] to archive 2024-11-11T12:43:34,797 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:43:34,798 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/550498b38c064ae8b3361f4c237e5731 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/550498b38c064ae8b3361f4c237e5731 2024-11-11T12:43:34,798 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1d777de6bdec4b6388566065ca466a70 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1d777de6bdec4b6388566065ca466a70 2024-11-11T12:43:34,799 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1f2203ef17f4480da1ad14db506efb47 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/1f2203ef17f4480da1ad14db506efb47 2024-11-11T12:43:34,800 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/66f34f4b4b0144d0b192d38cbe7b953c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/66f34f4b4b0144d0b192d38cbe7b953c 2024-11-11T12:43:34,801 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4f214173249243a6aeeff5debc473489 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4f214173249243a6aeeff5debc473489 2024-11-11T12:43:34,802 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bd5567cb4d3a432cbcd373cba4c11f70 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bd5567cb4d3a432cbcd373cba4c11f70 2024-11-11T12:43:34,803 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/0920785fe52c4d8c8b7e822c003a4a4a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/0920785fe52c4d8c8b7e822c003a4a4a 2024-11-11T12:43:34,803 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/694b27b0b3d74dd39f2393a2946381a7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/694b27b0b3d74dd39f2393a2946381a7 2024-11-11T12:43:34,804 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/270debd3692442d5bade68f8336bf76a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/270debd3692442d5bade68f8336bf76a 2024-11-11T12:43:34,805 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/b35e5c07e1d54864b289c294ce9352bb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/b35e5c07e1d54864b289c294ce9352bb 2024-11-11T12:43:34,806 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/212cf74028e44fca91870f4ef64ea34f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/212cf74028e44fca91870f4ef64ea34f 2024-11-11T12:43:34,807 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/d8cdbae8c862489cb9304e7831a31a0f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/d8cdbae8c862489cb9304e7831a31a0f 2024-11-11T12:43:34,808 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/c3f8203c435646c19aff0a4d28b1676d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/c3f8203c435646c19aff0a4d28b1676d 2024-11-11T12:43:34,809 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/eadbf14c611e4d8e9299252f8a97270b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/eadbf14c611e4d8e9299252f8a97270b 2024-11-11T12:43:34,810 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8abaa8dea7344c81a0e84df827b0cb99 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8abaa8dea7344c81a0e84df827b0cb99 2024-11-11T12:43:34,811 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4de76553e9c14e0391d4fa78181f7b5a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/4de76553e9c14e0391d4fa78181f7b5a 2024-11-11T12:43:34,812 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/43b48fedfae54b04913cf5c4144b694d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/43b48fedfae54b04913cf5c4144b694d 2024-11-11T12:43:34,813 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/3e61ac387dd040a8bec40ab9b7a14fc9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/3e61ac387dd040a8bec40ab9b7a14fc9 2024-11-11T12:43:34,814 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8be456b3f3124fe7a88727915bf086a3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/8be456b3f3124fe7a88727915bf086a3 2024-11-11T12:43:34,815 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bec61211c65946e38a3addc193d4178c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bec61211c65946e38a3addc193d4178c 2024-11-11T12:43:34,816 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ae79f5b983874dc3a01aeb0b51509047 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ae79f5b983874dc3a01aeb0b51509047 2024-11-11T12:43:34,817 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ed9d930cb20f44c1b9521d7c448e3190 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/ed9d930cb20f44c1b9521d7c448e3190 2024-11-11T12:43:34,818 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/cf6225490fef4dd59d704995f5fa44fa to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/cf6225490fef4dd59d704995f5fa44fa 2024-11-11T12:43:34,818 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/da98e255f64d4728a8593b01ae0da76f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/da98e255f64d4728a8593b01ae0da76f 2024-11-11T12:43:34,819 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/f84dcadc79dd49aaa0ccb4f8f7f03370 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/f84dcadc79dd49aaa0ccb4f8f7f03370 2024-11-11T12:43:34,820 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/50e0868a5e16447daf0654ae2b398d35 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/50e0868a5e16447daf0654ae2b398d35 2024-11-11T12:43:34,821 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/45f06f59a88a4085b32c17f7e2c941b7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/45f06f59a88a4085b32c17f7e2c941b7 2024-11-11T12:43:34,822 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/9f8427eef6f540a5a1f30d490a6bd869 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/9f8427eef6f540a5a1f30d490a6bd869 2024-11-11T12:43:34,823 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/155e7887a9b949ed87659ab862ed30fb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/155e7887a9b949ed87659ab862ed30fb 2024-11-11T12:43:34,824 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bfa169967cdd4d3cadfa09ad0410b342 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/bfa169967cdd4d3cadfa09ad0410b342 2024-11-11T12:43:34,825 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/948b88661a54467cabb435c9fd7ecde8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/f353a3a5bb674cf8ab5d355f8c1f81ff, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/324460ff5be7429ca320b34ec9d68254, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ba7fb8a85cda493f942ed77b87ec7b62, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/249666bb872b49d6a3555689be3011b6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/76ef4c3e075348098c6557651a470f9b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8ca5b1d51e7f4fabaa1a1252a2f79d73, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e1505516907497db5dfdf18e33ed715, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3971f271b5de4aaa82ae066eebda12a7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/a7b869c6ee3b4e5bb53453e4b7fb0813, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/140c7b9116f741048f700e0a89a88ff5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fe9783b7f541420ebe755db4f143a66d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3b5f83300512427cb37bfee3d4792524, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/07e370351fb748479519bee456f5a266, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/65b7669277a449eb8553e5c50218bc89, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/590e2f8a81e64058b4d98ca64b32008e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fb742f2230ec4e66b0c5104593052ea9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/09bce5af97354beca8649b8fd1b23eec, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/657988a4c4d54b4f956aeeac925379cd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/b05b864d7ed84dfba21e2419cec3a727, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/2d0ea1b82ef14297bee859667293e160, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1deb957874d742b18fbf37134a939af7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/9c22ddb5a88e409f86b4bc7b002ca5fa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e8973b542c6439a8d1072c13e2ac27f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/e7ca2f350d8e40b5b8dce30e6d1cea0e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/789485d2fc494b5ebe3607411fd3ae72, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/bde5d6ad344f49b2b623e6e91c1f4ca9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ce951134150543b68aca62744b1b8fe7] to archive 2024-11-11T12:43:34,826 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:43:34,827 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/948b88661a54467cabb435c9fd7ecde8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/948b88661a54467cabb435c9fd7ecde8 2024-11-11T12:43:34,828 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/f353a3a5bb674cf8ab5d355f8c1f81ff to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/f353a3a5bb674cf8ab5d355f8c1f81ff 2024-11-11T12:43:34,829 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/324460ff5be7429ca320b34ec9d68254 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/324460ff5be7429ca320b34ec9d68254 2024-11-11T12:43:34,830 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ba7fb8a85cda493f942ed77b87ec7b62 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ba7fb8a85cda493f942ed77b87ec7b62 2024-11-11T12:43:34,831 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/249666bb872b49d6a3555689be3011b6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/249666bb872b49d6a3555689be3011b6 2024-11-11T12:43:34,831 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/76ef4c3e075348098c6557651a470f9b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/76ef4c3e075348098c6557651a470f9b 2024-11-11T12:43:34,832 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8ca5b1d51e7f4fabaa1a1252a2f79d73 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8ca5b1d51e7f4fabaa1a1252a2f79d73 2024-11-11T12:43:34,833 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e1505516907497db5dfdf18e33ed715 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e1505516907497db5dfdf18e33ed715 2024-11-11T12:43:34,834 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3971f271b5de4aaa82ae066eebda12a7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3971f271b5de4aaa82ae066eebda12a7 2024-11-11T12:43:34,835 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/a7b869c6ee3b4e5bb53453e4b7fb0813 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/a7b869c6ee3b4e5bb53453e4b7fb0813 2024-11-11T12:43:34,836 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/140c7b9116f741048f700e0a89a88ff5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/140c7b9116f741048f700e0a89a88ff5 2024-11-11T12:43:34,837 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fe9783b7f541420ebe755db4f143a66d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fe9783b7f541420ebe755db4f143a66d 2024-11-11T12:43:34,837 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3b5f83300512427cb37bfee3d4792524 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/3b5f83300512427cb37bfee3d4792524 2024-11-11T12:43:34,840 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/07e370351fb748479519bee456f5a266 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/07e370351fb748479519bee456f5a266 2024-11-11T12:43:34,842 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/65b7669277a449eb8553e5c50218bc89 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/65b7669277a449eb8553e5c50218bc89 2024-11-11T12:43:34,843 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/590e2f8a81e64058b4d98ca64b32008e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/590e2f8a81e64058b4d98ca64b32008e 2024-11-11T12:43:34,844 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fb742f2230ec4e66b0c5104593052ea9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/fb742f2230ec4e66b0c5104593052ea9 2024-11-11T12:43:34,845 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/09bce5af97354beca8649b8fd1b23eec to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/09bce5af97354beca8649b8fd1b23eec 2024-11-11T12:43:34,845 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/657988a4c4d54b4f956aeeac925379cd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/657988a4c4d54b4f956aeeac925379cd 2024-11-11T12:43:34,846 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/b05b864d7ed84dfba21e2419cec3a727 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/b05b864d7ed84dfba21e2419cec3a727 2024-11-11T12:43:34,847 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/2d0ea1b82ef14297bee859667293e160 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/2d0ea1b82ef14297bee859667293e160 2024-11-11T12:43:34,848 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1deb957874d742b18fbf37134a939af7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1deb957874d742b18fbf37134a939af7 2024-11-11T12:43:34,849 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/9c22ddb5a88e409f86b4bc7b002ca5fa to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/9c22ddb5a88e409f86b4bc7b002ca5fa 2024-11-11T12:43:34,850 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e8973b542c6439a8d1072c13e2ac27f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/8e8973b542c6439a8d1072c13e2ac27f 2024-11-11T12:43:34,851 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/e7ca2f350d8e40b5b8dce30e6d1cea0e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/e7ca2f350d8e40b5b8dce30e6d1cea0e 2024-11-11T12:43:34,852 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/789485d2fc494b5ebe3607411fd3ae72 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/789485d2fc494b5ebe3607411fd3ae72 2024-11-11T12:43:34,854 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/bde5d6ad344f49b2b623e6e91c1f4ca9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/bde5d6ad344f49b2b623e6e91c1f4ca9 2024-11-11T12:43:34,855 DEBUG [StoreCloser-TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ce951134150543b68aca62744b1b8fe7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/ce951134150543b68aca62744b1b8fe7 2024-11-11T12:43:34,863 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/recovered.edits/427.seqid, newMaxSeqId=427, maxSeqId=4 2024-11-11T12:43:34,863 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358. 2024-11-11T12:43:34,863 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] regionserver.HRegion(1635): Region close journal for 99827bdf8e81fc8bb34d29fe73f0a358: 2024-11-11T12:43:34,865 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=123}] handler.UnassignRegionHandler(170): Closed 99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:34,865 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=122 updating hbase:meta row=99827bdf8e81fc8bb34d29fe73f0a358, regionState=CLOSED 2024-11-11T12:43:34,868 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-11-11T12:43:34,868 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; CloseRegionProcedure 99827bdf8e81fc8bb34d29fe73f0a358, server=32e78532c8b1,44673,1731328897232 in 1.9130 sec 2024-11-11T12:43:34,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=122, resume processing ppid=121 2024-11-11T12:43:34,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, ppid=121, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=99827bdf8e81fc8bb34d29fe73f0a358, UNASSIGN in 1.9170 sec 2024-11-11T12:43:34,870 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-11-11T12:43:34,871 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9190 sec 2024-11-11T12:43:34,872 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329014871"}]},"ts":"1731329014871"} 2024-11-11T12:43:34,873 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-11T12:43:34,875 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-11T12:43:34,877 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9350 sec 2024-11-11T12:43:34,957 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T12:43:35,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=120 2024-11-11T12:43:35,048 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 120 completed 2024-11-11T12:43:35,049 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-11T12:43:35,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:35,050 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=124, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:35,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-11T12:43:35,050 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=124, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:35,052 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,054 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/recovered.edits] 2024-11-11T12:43:35,056 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c30c0d2a1de948ddacb2cd346300c86e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/c30c0d2a1de948ddacb2cd346300c86e 2024-11-11T12:43:35,057 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/de0e965d908445838b911b54ab159671 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/A/de0e965d908445838b911b54ab159671 2024-11-11T12:43:35,059 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/83db3a29e8c440b491aafb754c2fc368 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/83db3a29e8c440b491aafb754c2fc368 2024-11-11T12:43:35,060 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/96b63a44212a47e0895a78b50af94b99 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/B/96b63a44212a47e0895a78b50af94b99 2024-11-11T12:43:35,061 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1d6a918cbc5b48bb87a6e9f9decadd72 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/1d6a918cbc5b48bb87a6e9f9decadd72 2024-11-11T12:43:35,062 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/cc5a8a920b5640068861f2dc0cb8d0a5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/C/cc5a8a920b5640068861f2dc0cb8d0a5 2024-11-11T12:43:35,064 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/recovered.edits/427.seqid to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358/recovered.edits/427.seqid 2024-11-11T12:43:35,065 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,065 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-11T12:43:35,065 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-11T12:43:35,066 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-11T12:43:35,069 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111120212ee4849b48179e8061a9786dbb70_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111120212ee4849b48179e8061a9786dbb70_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,070 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411113e851205cdcc484aaaac1d6c5e479bf8_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411113e851205cdcc484aaaac1d6c5e479bf8_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,071 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115e1eaf4d4e5c45baa7ddf824676b6b69_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411115e1eaf4d4e5c45baa7ddf824676b6b69_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,072 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411116387cad272a845139578f10484637705_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411116387cad272a845139578f10484637705_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,073 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411116f3a8227bfea44679ce79ab2e5501dc3_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411116f3a8227bfea44679ce79ab2e5501dc3_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,074 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111789822a902594134bb1c36692d16109e_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111789822a902594134bb1c36692d16109e_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,075 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111798d319fd56e4f83913dbbe1df76ed61_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111798d319fd56e4f83913dbbe1df76ed61_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,076 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411117e3be542fc1249ba832008a44f20ccd2_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411117e3be542fc1249ba832008a44f20ccd2_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,077 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111183b7cffea39447128d201e1c92383e53_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111183b7cffea39447128d201e1c92383e53_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,078 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411118d2fea575cf94ea9b3f9aef30fff4353_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411118d2fea575cf94ea9b3f9aef30fff4353_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,079 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111197ba68f4021345338c38136962a8f5ef_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111197ba68f4021345338c38136962a8f5ef_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,080 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111199ead5202df24a32acae03d77fac5107_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111199ead5202df24a32acae03d77fac5107_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,081 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119ad013b9e98a4319b241e97cdc0d5785_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119ad013b9e98a4319b241e97cdc0d5785_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,081 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119d6a519395174d45a3e3f5c2a94c1696_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119d6a519395174d45a3e3f5c2a94c1696_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,082 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b5ae1632ba6b499b9be6c4d044f5902b_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b5ae1632ba6b499b9be6c4d044f5902b_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,083 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111bcac93fa22484b9aaa72a4e7e2906704_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111bcac93fa22484b9aaa72a4e7e2906704_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,083 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c26c0c88dc2a44429bea0de8a5fcf1a2_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c26c0c88dc2a44429bea0de8a5fcf1a2_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,084 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c277645f5f494bf19dd587ec27dffced_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c277645f5f494bf19dd587ec27dffced_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,085 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c94c38f4c1f94a26aacc977dbf3bd21e_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111c94c38f4c1f94a26aacc977dbf3bd21e_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,086 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ce24b40c10394451a8a66fefc5550633_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ce24b40c10394451a8a66fefc5550633_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,086 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111d4a4366c23a041fd928fb3ae552b0ff9_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111d4a4366c23a041fd928fb3ae552b0ff9_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,087 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e8cf033c03154a059fdc68c643c541d2_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e8cf033c03154a059fdc68c643c541d2_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,088 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111f5dd0a4f4efb46dd9303ae2cbb229f9e_99827bdf8e81fc8bb34d29fe73f0a358 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111f5dd0a4f4efb46dd9303ae2cbb229f9e_99827bdf8e81fc8bb34d29fe73f0a358 2024-11-11T12:43:35,088 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-11T12:43:35,090 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=124, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:35,091 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-11T12:43:35,093 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-11T12:43:35,094 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=124, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:35,094 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-11T12:43:35,094 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731329015094"}]},"ts":"9223372036854775807"} 2024-11-11T12:43:35,096 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-11T12:43:35,096 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 99827bdf8e81fc8bb34d29fe73f0a358, NAME => 'TestAcidGuarantees,,1731328982490.99827bdf8e81fc8bb34d29fe73f0a358.', STARTKEY => '', ENDKEY => ''}] 2024-11-11T12:43:35,096 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-11T12:43:35,096 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731329015096"}]},"ts":"9223372036854775807"} 2024-11-11T12:43:35,098 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-11T12:43:35,100 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=124, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:35,101 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 51 msec 2024-11-11T12:43:35,151 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-11-11T12:43:35,151 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 124 completed 2024-11-11T12:43:35,161 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobScanAtomicity Thread=234 (was 238), OpenFileDescriptor=455 (was 456), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=871 (was 904), ProcessCount=9 (was 9), AvailableMemoryMB=2363 (was 3270) 2024-11-11T12:43:35,169 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=234, OpenFileDescriptor=455, MaxFileDescriptor=1048576, SystemLoadAverage=871, ProcessCount=9, AvailableMemoryMB=2362 2024-11-11T12:43:35,170 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:43:35,171 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:43:35,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-11T12:43:35,172 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T12:43:35,172 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:35,172 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 125 2024-11-11T12:43:35,173 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T12:43:35,173 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-11T12:43:35,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742297_1473 (size=960) 2024-11-11T12:43:35,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-11T12:43:35,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-11T12:43:35,580 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:43:35,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742298_1474 (size=53) 2024-11-11T12:43:35,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-11T12:43:35,985 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:43:35,985 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing bb21a7c6e49c779e06f46670f1405ab7, disabling compactions & flushes 2024-11-11T12:43:35,985 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:35,985 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:35,985 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. after waiting 0 ms 2024-11-11T12:43:35,985 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:35,985 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:35,985 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:35,986 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T12:43:35,987 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731329015986"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731329015986"}]},"ts":"1731329015986"} 2024-11-11T12:43:35,987 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T12:43:35,988 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T12:43:35,988 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329015988"}]},"ts":"1731329015988"} 2024-11-11T12:43:35,989 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-11T12:43:35,993 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb21a7c6e49c779e06f46670f1405ab7, ASSIGN}] 2024-11-11T12:43:35,994 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb21a7c6e49c779e06f46670f1405ab7, ASSIGN 2024-11-11T12:43:35,994 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb21a7c6e49c779e06f46670f1405ab7, ASSIGN; state=OFFLINE, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=false 2024-11-11T12:43:36,145 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=bb21a7c6e49c779e06f46670f1405ab7, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:36,146 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; OpenRegionProcedure bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:43:36,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-11T12:43:36,298 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:36,300 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:36,301 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7285): Opening region: {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:43:36,301 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,301 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:43:36,301 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7327): checking encryption for bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,301 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(7330): checking classloading for bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,302 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,304 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:36,304 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb21a7c6e49c779e06f46670f1405ab7 columnFamilyName A 2024-11-11T12:43:36,304 DEBUG [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:36,304 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.HStore(327): Store=bb21a7c6e49c779e06f46670f1405ab7/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:36,305 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,306 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:36,306 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb21a7c6e49c779e06f46670f1405ab7 columnFamilyName B 2024-11-11T12:43:36,306 DEBUG [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:36,306 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.HStore(327): Store=bb21a7c6e49c779e06f46670f1405ab7/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:36,306 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,307 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:43:36,308 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region bb21a7c6e49c779e06f46670f1405ab7 columnFamilyName C 2024-11-11T12:43:36,308 DEBUG [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:43:36,308 INFO [StoreOpener-bb21a7c6e49c779e06f46670f1405ab7-1 {}] regionserver.HStore(327): Store=bb21a7c6e49c779e06f46670f1405ab7/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:43:36,308 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:36,309 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,309 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,310 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:43:36,312 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1085): writing seq id for bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:36,313 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:43:36,314 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1102): Opened bb21a7c6e49c779e06f46670f1405ab7; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73840125, jitterRate=0.10030360519886017}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:43:36,315 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegion(1001): Region open journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:36,316 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., pid=127, masterSystemTime=1731329016297 2024-11-11T12:43:36,317 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:36,317 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=127}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:36,318 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=bb21a7c6e49c779e06f46670f1405ab7, regionState=OPEN, openSeqNum=2, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:36,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-11-11T12:43:36,320 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; OpenRegionProcedure bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 in 173 msec 2024-11-11T12:43:36,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-11-11T12:43:36,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb21a7c6e49c779e06f46670f1405ab7, ASSIGN in 327 msec 2024-11-11T12:43:36,322 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T12:43:36,322 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329016322"}]},"ts":"1731329016322"} 2024-11-11T12:43:36,323 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-11T12:43:36,326 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=125, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T12:43:36,327 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 1.1550 sec 2024-11-11T12:43:37,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=125 2024-11-11T12:43:37,277 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 125 completed 2024-11-11T12:43:37,279 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x022a6e9f to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4c60eb7d 2024-11-11T12:43:37,283 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@695c2253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,284 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,286 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43270, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,286 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T12:43:37,287 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42186, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T12:43:37,289 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x32c12a30 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@79b10416 2024-11-11T12:43:37,292 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7177efc9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,293 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5ef40578 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2f142b04 2024-11-11T12:43:37,296 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61d38088, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,296 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x032bb71c to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@de9f076 2024-11-11T12:43:37,300 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7043f683, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,301 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x06bc0f7c to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4414259d 2024-11-11T12:43:37,306 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b0c2472, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,307 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1b8b6e04 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7ed69825 2024-11-11T12:43:37,310 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34b30c39, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,311 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11193a0c to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d672ed2 2024-11-11T12:43:37,319 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f7c40ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,320 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7861b162 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7cf40102 2024-11-11T12:43:37,323 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41b0e7b6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,324 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x154f0f85 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@496fe03f 2024-11-11T12:43:37,331 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f2423f3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,332 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x008a917b to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3652e74d 2024-11-11T12:43:37,335 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@184771cf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,336 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x054c2725 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2405c04e 2024-11-11T12:43:37,339 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76f0408, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:43:37,342 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:37,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees 2024-11-11T12:43:37,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-11T12:43:37,343 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:37,343 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=128, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:37,343 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:37,344 DEBUG [hconnection-0x20fb0dfe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,345 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43280, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,348 DEBUG [hconnection-0x35b4cd7f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,349 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43288, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,352 DEBUG [hconnection-0x2aeb0800-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,353 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43302, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:37,358 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:43:37,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:37,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:37,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:37,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:37,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:37,359 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:37,360 DEBUG [hconnection-0x43cc4d43-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,361 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43316, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,363 DEBUG [hconnection-0xce54205-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,364 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43328, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,374 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329077374, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,375 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329077375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,375 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,375 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329077375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,380 DEBUG [hconnection-0x6c2f3258-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,381 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43344, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,382 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329077382, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,384 DEBUG [hconnection-0x58a5ec77-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,385 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43360, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,385 DEBUG [hconnection-0x4746177f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,386 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,387 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329077387, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,391 DEBUG [hconnection-0x2f98c4cf-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,392 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/d8a00173f9574c8f9e132ded7dfc9479 is 50, key is test_row_0/A:col10/1731329017354/Put/seqid=0 2024-11-11T12:43:37,392 DEBUG [hconnection-0x3b17e29f-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:43:37,392 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43384, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,393 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:43392, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:43:37,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742299_1475 (size=12001) 2024-11-11T12:43:37,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-11T12:43:37,477 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329077476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329077476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,478 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 10 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329077476, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,483 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329077483, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,488 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329077488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,495 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,495 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-11T12:43:37,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:37,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:37,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:37,496 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,496 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,496 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-11T12:43:37,647 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,648 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-11T12:43:37,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:37,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:37,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:37,648 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,648 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,684 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 12 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329077681, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,688 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329077686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,690 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329077686, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329077684, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 6 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329077689, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,802 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,802 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-11T12:43:37,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:37,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:37,802 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:37,803 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,803 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,816 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/d8a00173f9574c8f9e132ded7dfc9479 2024-11-11T12:43:37,849 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a872c7cfb1e64f89b29795744e1abece is 50, key is test_row_0/B:col10/1731329017354/Put/seqid=0 2024-11-11T12:43:37,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742300_1476 (size=12001) 2024-11-11T12:43:37,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-11T12:43:37,955 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,957 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-11T12:43:37,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:37,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:37,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:37,958 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,958 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:37,996 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 14 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329077992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:37,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329077997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:37,999 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329077997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,000 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 8 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329077997, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,001 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329078000, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,112 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,113 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-11T12:43:38,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:38,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:38,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:38,113 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:38,113 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:38,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:38,254 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a872c7cfb1e64f89b29795744e1abece 2024-11-11T12:43:38,265 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-11T12:43:38,268 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:38,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:38,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:38,269 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] handler.RSProcedureHandler(58): pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:38,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=129 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:38,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=129 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:38,291 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/ba67adc9fc4c4485a3f931d41cdb1ee0 is 50, key is test_row_0/C:col10/1731329017354/Put/seqid=0 2024-11-11T12:43:38,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742301_1477 (size=12001) 2024-11-11T12:43:38,301 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=12 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/ba67adc9fc4c4485a3f931d41cdb1ee0 2024-11-11T12:43:38,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/d8a00173f9574c8f9e132ded7dfc9479 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d8a00173f9574c8f9e132ded7dfc9479 2024-11-11T12:43:38,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d8a00173f9574c8f9e132ded7dfc9479, entries=150, sequenceid=12, filesize=11.7 K 2024-11-11T12:43:38,325 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a872c7cfb1e64f89b29795744e1abece as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a872c7cfb1e64f89b29795744e1abece 2024-11-11T12:43:38,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a872c7cfb1e64f89b29795744e1abece, entries=150, sequenceid=12, filesize=11.7 K 2024-11-11T12:43:38,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/ba67adc9fc4c4485a3f931d41cdb1ee0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ba67adc9fc4c4485a3f931d41cdb1ee0 2024-11-11T12:43:38,338 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ba67adc9fc4c4485a3f931d41cdb1ee0, entries=150, sequenceid=12, filesize=11.7 K 2024-11-11T12:43:38,339 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=161.02 KB/164880 for bb21a7c6e49c779e06f46670f1405ab7 in 981ms, sequenceid=12, compaction requested=false 2024-11-11T12:43:38,339 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:38,422 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,422 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=129 2024-11-11T12:43:38,422 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:38,423 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-11T12:43:38,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:38,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:38,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:38,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:38,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:38,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:38,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/8fe96b715b7b4c84a36b5aadc45c0ed6 is 50, key is test_row_0/A:col10/1731329017373/Put/seqid=0 2024-11-11T12:43:38,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-11T12:43:38,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742302_1478 (size=12001) 2024-11-11T12:43:38,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:38,505 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:38,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329078508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,515 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329078508, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,516 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329078511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329078511, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,517 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 18 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329078512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,616 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329078615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329078619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329078619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329078619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,625 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 20 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329078620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,822 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329078819, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,827 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329078824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329078824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,828 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329078824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,833 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:38,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329078830, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:38,858 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/8fe96b715b7b4c84a36b5aadc45c0ed6 2024-11-11T12:43:38,869 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/9e7e70e3a49447b28389a836567ebaec is 50, key is test_row_0/B:col10/1731329017373/Put/seqid=0 2024-11-11T12:43:38,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742303_1479 (size=12001) 2024-11-11T12:43:38,878 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/9e7e70e3a49447b28389a836567ebaec 2024-11-11T12:43:38,898 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/84c574406a8943e6bfdaebac5112da20 is 50, key is test_row_0/C:col10/1731329017373/Put/seqid=0 2024-11-11T12:43:38,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742304_1480 (size=12001) 2024-11-11T12:43:39,135 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329079129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,137 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329079132, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,138 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329079133, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,146 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,146 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329079140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,165 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329079160, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,328 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=39 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/84c574406a8943e6bfdaebac5112da20 2024-11-11T12:43:39,354 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/8fe96b715b7b4c84a36b5aadc45c0ed6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8fe96b715b7b4c84a36b5aadc45c0ed6 2024-11-11T12:43:39,372 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8fe96b715b7b4c84a36b5aadc45c0ed6, entries=150, sequenceid=39, filesize=11.7 K 2024-11-11T12:43:39,373 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/9e7e70e3a49447b28389a836567ebaec as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9e7e70e3a49447b28389a836567ebaec 2024-11-11T12:43:39,386 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9e7e70e3a49447b28389a836567ebaec, entries=150, sequenceid=39, filesize=11.7 K 2024-11-11T12:43:39,398 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/84c574406a8943e6bfdaebac5112da20 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/84c574406a8943e6bfdaebac5112da20 2024-11-11T12:43:39,422 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/84c574406a8943e6bfdaebac5112da20, entries=150, sequenceid=39, filesize=11.7 K 2024-11-11T12:43:39,423 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for bb21a7c6e49c779e06f46670f1405ab7 in 1001ms, sequenceid=39, compaction requested=false 2024-11-11T12:43:39,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:39,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:39,423 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=129}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=129 2024-11-11T12:43:39,430 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=129 2024-11-11T12:43:39,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-11T12:43:39,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=129, resume processing ppid=128 2024-11-11T12:43:39,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, ppid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0890 sec 2024-11-11T12:43:39,466 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=128, table=TestAcidGuarantees in 2.1230 sec 2024-11-11T12:43:39,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:39,659 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:43:39,664 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:39,665 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:39,678 ERROR [LeaseRenewer:jenkins.hfs.0@localhost:42421 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins.hfs.0@localhost:42421,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:39,682 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/48a6efacb62642f0b01421ba9638563a is 50, key is test_row_0/A:col10/1731329019659/Put/seqid=0 2024-11-11T12:43:39,689 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742305_1481 (size=16681) 2024-11-11T12:43:39,722 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329079715, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,723 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329079716, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329079717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,724 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329079717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,725 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329079717, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,830 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329079824, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329079826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,831 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329079827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,832 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329079828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:39,832 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:39,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329079829, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,041 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329080035, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,042 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329080036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329080036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,043 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329080036, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,052 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329080044, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,090 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/48a6efacb62642f0b01421ba9638563a 2024-11-11T12:43:40,149 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/1e9179df896a4d4a8e258adfa540a56e is 50, key is test_row_0/B:col10/1731329019659/Put/seqid=0 2024-11-11T12:43:40,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742306_1482 (size=12001) 2024-11-11T12:43:40,344 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329080344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329080344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,349 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329080345, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,351 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329080349, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,357 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329080355, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,415 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T12:43:40,573 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/1e9179df896a4d4a8e258adfa540a56e 2024-11-11T12:43:40,618 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/1e90c5b81e644ff085e35c13b62ad16e is 50, key is test_row_0/C:col10/1731329019659/Put/seqid=0 2024-11-11T12:43:40,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742307_1483 (size=12001) 2024-11-11T12:43:40,677 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/1e90c5b81e644ff085e35c13b62ad16e 2024-11-11T12:43:40,693 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/48a6efacb62642f0b01421ba9638563a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/48a6efacb62642f0b01421ba9638563a 2024-11-11T12:43:40,698 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/48a6efacb62642f0b01421ba9638563a, entries=250, sequenceid=52, filesize=16.3 K 2024-11-11T12:43:40,699 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/1e9179df896a4d4a8e258adfa540a56e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/1e9179df896a4d4a8e258adfa540a56e 2024-11-11T12:43:40,704 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/1e9179df896a4d4a8e258adfa540a56e, entries=150, sequenceid=52, filesize=11.7 K 2024-11-11T12:43:40,705 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/1e90c5b81e644ff085e35c13b62ad16e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e90c5b81e644ff085e35c13b62ad16e 2024-11-11T12:43:40,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e90c5b81e644ff085e35c13b62ad16e, entries=150, sequenceid=52, filesize=11.7 K 2024-11-11T12:43:40,711 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for bb21a7c6e49c779e06f46670f1405ab7 in 1052ms, sequenceid=52, compaction requested=true 2024-11-11T12:43:40,711 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:40,711 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:40,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:40,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:40,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:40,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:40,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:40,712 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:43:40,712 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40683 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:40,712 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:40,713 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:40,713 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d8a00173f9574c8f9e132ded7dfc9479, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8fe96b715b7b4c84a36b5aadc45c0ed6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/48a6efacb62642f0b01421ba9638563a] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=39.7 K 2024-11-11T12:43:40,713 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d8a00173f9574c8f9e132ded7dfc9479, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1731329017350 2024-11-11T12:43:40,713 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8fe96b715b7b4c84a36b5aadc45c0ed6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731329017372 2024-11-11T12:43:40,714 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 48a6efacb62642f0b01421ba9638563a, keycount=250, bloomtype=ROW, size=16.3 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329018508 2024-11-11T12:43:40,716 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:40,724 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:40,724 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:40,724 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:40,724 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a872c7cfb1e64f89b29795744e1abece, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9e7e70e3a49447b28389a836567ebaec, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/1e9179df896a4d4a8e258adfa540a56e] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=35.2 K 2024-11-11T12:43:40,725 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a872c7cfb1e64f89b29795744e1abece, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1731329017350 2024-11-11T12:43:40,725 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 9e7e70e3a49447b28389a836567ebaec, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731329017372 2024-11-11T12:43:40,726 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e9179df896a4d4a8e258adfa540a56e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329019646 2024-11-11T12:43:40,729 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#399 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:40,729 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3fbdd66064564903ba371ce4434f864d is 50, key is test_row_0/A:col10/1731329019659/Put/seqid=0 2024-11-11T12:43:40,738 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#400 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:40,738 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/9152c670f593481485fd83a14cb993d2 is 50, key is test_row_0/B:col10/1731329019659/Put/seqid=0 2024-11-11T12:43:40,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742308_1484 (size=12104) 2024-11-11T12:43:40,758 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3fbdd66064564903ba371ce4434f864d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3fbdd66064564903ba371ce4434f864d 2024-11-11T12:43:40,763 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into 3fbdd66064564903ba371ce4434f864d(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:40,764 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:40,764 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=13, startTime=1731329020711; duration=0sec 2024-11-11T12:43:40,764 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:40,764 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:40,764 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:40,765 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36003 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:40,765 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:40,765 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:40,765 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ba67adc9fc4c4485a3f931d41cdb1ee0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/84c574406a8943e6bfdaebac5112da20, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e90c5b81e644ff085e35c13b62ad16e] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=35.2 K 2024-11-11T12:43:40,766 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba67adc9fc4c4485a3f931d41cdb1ee0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=12, earliestPutTs=1731329017350 2024-11-11T12:43:40,767 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 84c574406a8943e6bfdaebac5112da20, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=39, earliestPutTs=1731329017372 2024-11-11T12:43:40,768 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e90c5b81e644ff085e35c13b62ad16e, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329019646 2024-11-11T12:43:40,778 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#401 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:40,778 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7e4e1b4d6a514b5f996a9522d60322dc is 50, key is test_row_0/C:col10/1731329019659/Put/seqid=0 2024-11-11T12:43:40,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742309_1485 (size=12104) 2024-11-11T12:43:40,804 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/9152c670f593481485fd83a14cb993d2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9152c670f593481485fd83a14cb993d2 2024-11-11T12:43:40,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742310_1486 (size=12104) 2024-11-11T12:43:40,828 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into 9152c670f593481485fd83a14cb993d2(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:40,828 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:40,828 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=13, startTime=1731329020711; duration=0sec 2024-11-11T12:43:40,828 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:40,828 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:40,829 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7e4e1b4d6a514b5f996a9522d60322dc as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7e4e1b4d6a514b5f996a9522d60322dc 2024-11-11T12:43:40,841 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into 7e4e1b4d6a514b5f996a9522d60322dc(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:40,841 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:40,841 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=13, startTime=1731329020712; duration=0sec 2024-11-11T12:43:40,841 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:40,841 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:40,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:40,851 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:43:40,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:40,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:40,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:40,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:40,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:40,851 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:40,868 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/17d8120645be419593766fd7590ee8f5 is 50, key is test_row_0/A:col10/1731329019715/Put/seqid=0 2024-11-11T12:43:40,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329080868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329080868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,874 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,874 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329080868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329080868, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,875 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329080870, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742311_1487 (size=14341) 2024-11-11T12:43:40,886 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/17d8120645be419593766fd7590ee8f5 2024-11-11T12:43:40,905 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a786f38b60ad4d74995d5e6824939bb0 is 50, key is test_row_0/B:col10/1731329019715/Put/seqid=0 2024-11-11T12:43:40,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742312_1488 (size=12001) 2024-11-11T12:43:40,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329080976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,979 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329080976, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,981 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329080978, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:40,987 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:40,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329080981, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,184 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:41,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329081180, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,186 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:41,186 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329081181, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,193 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:41,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:41,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329081193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 45 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329081193, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,332 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a786f38b60ad4d74995d5e6824939bb0 2024-11-11T12:43:41,375 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/4dbdd1647f614ad38e793ee9cb2a5c81 is 50, key is test_row_0/C:col10/1731329019715/Put/seqid=0 2024-11-11T12:43:41,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742313_1489 (size=12001) 2024-11-11T12:43:41,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-11-11T12:43:41,464 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 128 completed 2024-11-11T12:43:41,472 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:41,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees 2024-11-11T12:43:41,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-11T12:43:41,486 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:41,487 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=130, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:41,487 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:41,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:41,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329081488, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,493 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:41,493 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329081489, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:41,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329081498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,522 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:41,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329081516, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-11T12:43:41,639 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,644 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:41,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:41,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:41,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:41,644 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-11T12:43:41,805 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=79 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/4dbdd1647f614ad38e793ee9cb2a5c81 2024-11-11T12:43:41,816 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,817 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:41,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:41,817 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:41,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:41,818 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,818 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,824 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/17d8120645be419593766fd7590ee8f5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/17d8120645be419593766fd7590ee8f5 2024-11-11T12:43:41,828 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/17d8120645be419593766fd7590ee8f5, entries=200, sequenceid=79, filesize=14.0 K 2024-11-11T12:43:41,829 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a786f38b60ad4d74995d5e6824939bb0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a786f38b60ad4d74995d5e6824939bb0 2024-11-11T12:43:41,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a786f38b60ad4d74995d5e6824939bb0, entries=150, sequenceid=79, filesize=11.7 K 2024-11-11T12:43:41,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/4dbdd1647f614ad38e793ee9cb2a5c81 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/4dbdd1647f614ad38e793ee9cb2a5c81 2024-11-11T12:43:41,839 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/4dbdd1647f614ad38e793ee9cb2a5c81, entries=150, sequenceid=79, filesize=11.7 K 2024-11-11T12:43:41,841 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for bb21a7c6e49c779e06f46670f1405ab7 in 990ms, sequenceid=79, compaction requested=false 2024-11-11T12:43:41,841 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:41,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:43:41,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:41,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:41,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:41,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:41,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:41,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:41,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:41,916 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/eb45d03598c74bf1912847c6708361b6 is 50, key is test_row_0/A:col10/1731329021882/Put/seqid=0 2024-11-11T12:43:41,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742314_1490 (size=12001) 2024-11-11T12:43:41,961 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/eb45d03598c74bf1912847c6708361b6 2024-11-11T12:43:41,974 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:41,975 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:41,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:41,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:41,975 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:41,975 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,976 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:41,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/c3509f04ef3a4dd0ae92004d913cf311 is 50, key is test_row_0/B:col10/1731329021882/Put/seqid=0 2024-11-11T12:43:42,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742315_1491 (size=12001) 2024-11-11T12:43:42,024 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329082009, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,025 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329082011, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,028 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329082021, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,033 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329082027, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,037 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329082030, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-11T12:43:42,130 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,131 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:42,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:42,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,131 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,131 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329082125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329082125, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,132 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329082130, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,142 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329082140, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,284 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:42,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:42,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,284 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,285 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,338 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329082334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329082334, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,339 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329082335, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,347 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329082343, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,420 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/c3509f04ef3a4dd0ae92004d913cf311 2024-11-11T12:43:42,441 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/eee4effec147420cb2d6e18a7153b3eb is 50, key is test_row_0/C:col10/1731329021882/Put/seqid=0 2024-11-11T12:43:42,444 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,445 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:42,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:42,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,445 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,445 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742316_1492 (size=12001) 2024-11-11T12:43:42,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-11T12:43:42,597 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,597 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:42,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:42,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,598 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,598 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,640 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329082640, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,645 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329082641, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,646 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329082642, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,652 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:42,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329082649, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,749 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,749 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:42,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:42,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,750 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] handler.RSProcedureHandler(58): pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,750 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=131 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=131 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:42,861 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=92 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/eee4effec147420cb2d6e18a7153b3eb 2024-11-11T12:43:42,872 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/eb45d03598c74bf1912847c6708361b6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/eb45d03598c74bf1912847c6708361b6 2024-11-11T12:43:42,876 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/eb45d03598c74bf1912847c6708361b6, entries=150, sequenceid=92, filesize=11.7 K 2024-11-11T12:43:42,877 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/c3509f04ef3a4dd0ae92004d913cf311 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/c3509f04ef3a4dd0ae92004d913cf311 2024-11-11T12:43:42,882 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/c3509f04ef3a4dd0ae92004d913cf311, entries=150, sequenceid=92, filesize=11.7 K 2024-11-11T12:43:42,882 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/eee4effec147420cb2d6e18a7153b3eb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eee4effec147420cb2d6e18a7153b3eb 2024-11-11T12:43:42,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eee4effec147420cb2d6e18a7153b3eb, entries=150, sequenceid=92, filesize=11.7 K 2024-11-11T12:43:42,888 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for bb21a7c6e49c779e06f46670f1405ab7 in 1000ms, sequenceid=92, compaction requested=true 2024-11-11T12:43:42,888 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:42,889 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:42,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:42,889 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:42,889 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:42,889 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 38446 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:42,890 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:42,890 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,890 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3fbdd66064564903ba371ce4434f864d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/17d8120645be419593766fd7590ee8f5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/eb45d03598c74bf1912847c6708361b6] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=37.5 K 2024-11-11T12:43:42,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:42,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:42,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:42,890 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3fbdd66064564903ba371ce4434f864d, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329019646 2024-11-11T12:43:42,890 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:42,890 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:42,890 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 17d8120645be419593766fd7590ee8f5, keycount=200, bloomtype=ROW, size=14.0 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731329019715 2024-11-11T12:43:42,891 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:42,891 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,891 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9152c670f593481485fd83a14cb993d2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a786f38b60ad4d74995d5e6824939bb0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/c3509f04ef3a4dd0ae92004d913cf311] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=35.3 K 2024-11-11T12:43:42,891 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting eb45d03598c74bf1912847c6708361b6, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1731329020856 2024-11-11T12:43:42,892 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 9152c670f593481485fd83a14cb993d2, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329019646 2024-11-11T12:43:42,893 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a786f38b60ad4d74995d5e6824939bb0, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731329019715 2024-11-11T12:43:42,893 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c3509f04ef3a4dd0ae92004d913cf311, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1731329020856 2024-11-11T12:43:42,901 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#408 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:42,902 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3f8a53777c114e939778c04548312574 is 50, key is test_row_0/A:col10/1731329021882/Put/seqid=0 2024-11-11T12:43:42,902 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:42,902 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=131 2024-11-11T12:43:42,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,903 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:43:42,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:42,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:42,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:42,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:42,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:42,903 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:42,915 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#409 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:42,916 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/9244b3e521d2498d842b74775e7dec1d is 50, key is test_row_0/B:col10/1731329021882/Put/seqid=0 2024-11-11T12:43:42,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/fddd528aa5884936bee502e348ba64a8 is 50, key is test_row_0/A:col10/1731329022006/Put/seqid=0 2024-11-11T12:43:42,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742317_1493 (size=12207) 2024-11-11T12:43:42,939 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3f8a53777c114e939778c04548312574 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3f8a53777c114e939778c04548312574 2024-11-11T12:43:42,944 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into 3f8a53777c114e939778c04548312574(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:42,944 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:42,944 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=13, startTime=1731329022888; duration=0sec 2024-11-11T12:43:42,944 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:42,944 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:42,944 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:42,945 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:42,945 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:42,945 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:42,945 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7e4e1b4d6a514b5f996a9522d60322dc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/4dbdd1647f614ad38e793ee9cb2a5c81, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eee4effec147420cb2d6e18a7153b3eb] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=35.3 K 2024-11-11T12:43:42,946 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7e4e1b4d6a514b5f996a9522d60322dc, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329019646 2024-11-11T12:43:42,946 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 4dbdd1647f614ad38e793ee9cb2a5c81, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=79, earliestPutTs=1731329019715 2024-11-11T12:43:42,946 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting eee4effec147420cb2d6e18a7153b3eb, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1731329020856 2024-11-11T12:43:42,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742318_1494 (size=12207) 2024-11-11T12:43:42,955 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#411 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:42,957 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/da95abf61bd44245999c6de3884a1ec7 is 50, key is test_row_0/C:col10/1731329021882/Put/seqid=0 2024-11-11T12:43:42,957 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/9244b3e521d2498d842b74775e7dec1d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9244b3e521d2498d842b74775e7dec1d 2024-11-11T12:43:42,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742319_1495 (size=12001) 2024-11-11T12:43:42,963 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into 9244b3e521d2498d842b74775e7dec1d(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:42,963 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:42,963 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=13, startTime=1731329022889; duration=0sec 2024-11-11T12:43:42,963 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:42,963 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:42,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742320_1496 (size=12207) 2024-11-11T12:43:42,969 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/da95abf61bd44245999c6de3884a1ec7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/da95abf61bd44245999c6de3884a1ec7 2024-11-11T12:43:42,974 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into da95abf61bd44245999c6de3884a1ec7(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:42,974 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:42,974 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=13, startTime=1731329022890; duration=0sec 2024-11-11T12:43:42,974 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:42,974 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:43,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:43,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:43,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:43,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329083079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:43,147 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:43,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329083143, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:43,148 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:43,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329083147, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:43,153 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:43,154 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329083150, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:43,159 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:43,159 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329083156, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:43,191 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:43,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329083190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:43,364 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/fddd528aa5884936bee502e348ba64a8 2024-11-11T12:43:43,378 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/80b06921640b40239a6dd4bb30fe1c5c is 50, key is test_row_0/B:col10/1731329022006/Put/seqid=0 2024-11-11T12:43:43,396 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:43,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329083394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:43,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742321_1497 (size=12001) 2024-11-11T12:43:43,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-11T12:43:43,697 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:43,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329083697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:43,815 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/80b06921640b40239a6dd4bb30fe1c5c 2024-11-11T12:43:43,842 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7bfacc10f4674aca93b6ca9b8308dffa is 50, key is test_row_0/C:col10/1731329022006/Put/seqid=0 2024-11-11T12:43:43,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742322_1498 (size=12001) 2024-11-11T12:43:43,896 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=115 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7bfacc10f4674aca93b6ca9b8308dffa 2024-11-11T12:43:43,928 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/fddd528aa5884936bee502e348ba64a8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/fddd528aa5884936bee502e348ba64a8 2024-11-11T12:43:43,956 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/fddd528aa5884936bee502e348ba64a8, entries=150, sequenceid=115, filesize=11.7 K 2024-11-11T12:43:43,960 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/80b06921640b40239a6dd4bb30fe1c5c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/80b06921640b40239a6dd4bb30fe1c5c 2024-11-11T12:43:43,966 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/80b06921640b40239a6dd4bb30fe1c5c, entries=150, sequenceid=115, filesize=11.7 K 2024-11-11T12:43:43,967 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7bfacc10f4674aca93b6ca9b8308dffa as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7bfacc10f4674aca93b6ca9b8308dffa 2024-11-11T12:43:43,990 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7bfacc10f4674aca93b6ca9b8308dffa, entries=150, sequenceid=115, filesize=11.7 K 2024-11-11T12:43:43,991 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for bb21a7c6e49c779e06f46670f1405ab7 in 1088ms, sequenceid=115, compaction requested=false 2024-11-11T12:43:43,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:43,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:43,991 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=131}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=131 2024-11-11T12:43:43,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=131 2024-11-11T12:43:43,997 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=131, resume processing ppid=130 2024-11-11T12:43:43,998 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.5090 sec 2024-11-11T12:43:43,999 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=130, table=TestAcidGuarantees in 2.5260 sec 2024-11-11T12:43:44,152 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:44,152 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:43:44,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:44,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:44,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:44,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:44,152 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:44,153 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:44,164 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/44fcc0e125a94195a557bbac604e7cc5 is 50, key is test_row_0/A:col10/1731329024151/Put/seqid=0 2024-11-11T12:43:44,191 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329084187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,192 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,192 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329084189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329084190, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,198 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,199 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329084192, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742323_1499 (size=9757) 2024-11-11T12:43:44,216 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329084213, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,298 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329084293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 63 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329084296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,300 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 66 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329084296, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329084300, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,508 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329084504, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329084505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,509 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 68 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329084505, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,514 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329084512, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,611 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/44fcc0e125a94195a557bbac604e7cc5 2024-11-11T12:43:44,651 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/e7aacf346f15438dbe7b2c4892f84f07 is 50, key is test_row_0/B:col10/1731329024151/Put/seqid=0 2024-11-11T12:43:44,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742324_1500 (size=9757) 2024-11-11T12:43:44,812 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329084811, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,818 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329084813, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,823 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329084817, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:44,827 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:44,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329084822, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,081 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/e7aacf346f15438dbe7b2c4892f84f07 2024-11-11T12:43:45,100 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b22b74314a114643b4c6174c07170146 is 50, key is test_row_0/C:col10/1731329024151/Put/seqid=0 2024-11-11T12:43:45,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742325_1501 (size=9757) 2024-11-11T12:43:45,134 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=132 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b22b74314a114643b4c6174c07170146 2024-11-11T12:43:45,140 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/44fcc0e125a94195a557bbac604e7cc5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/44fcc0e125a94195a557bbac604e7cc5 2024-11-11T12:43:45,148 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/44fcc0e125a94195a557bbac604e7cc5, entries=100, sequenceid=132, filesize=9.5 K 2024-11-11T12:43:45,149 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/e7aacf346f15438dbe7b2c4892f84f07 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/e7aacf346f15438dbe7b2c4892f84f07 2024-11-11T12:43:45,155 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/e7aacf346f15438dbe7b2c4892f84f07, entries=100, sequenceid=132, filesize=9.5 K 2024-11-11T12:43:45,164 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b22b74314a114643b4c6174c07170146 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b22b74314a114643b4c6174c07170146 2024-11-11T12:43:45,176 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b22b74314a114643b4c6174c07170146, entries=100, sequenceid=132, filesize=9.5 K 2024-11-11T12:43:45,184 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for bb21a7c6e49c779e06f46670f1405ab7 in 1032ms, sequenceid=132, compaction requested=true 2024-11-11T12:43:45,184 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:45,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:45,184 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:45,185 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:45,185 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:45,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:45,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:45,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:45,185 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:45,188 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:45,188 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:45,188 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:45,188 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3f8a53777c114e939778c04548312574, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/fddd528aa5884936bee502e348ba64a8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/44fcc0e125a94195a557bbac604e7cc5] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=33.2 K 2024-11-11T12:43:45,188 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:45,188 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:45,189 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:45,189 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9244b3e521d2498d842b74775e7dec1d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/80b06921640b40239a6dd4bb30fe1c5c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/e7aacf346f15438dbe7b2c4892f84f07] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=33.2 K 2024-11-11T12:43:45,189 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3f8a53777c114e939778c04548312574, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1731329020856 2024-11-11T12:43:45,192 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 9244b3e521d2498d842b74775e7dec1d, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1731329020856 2024-11-11T12:43:45,192 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting fddd528aa5884936bee502e348ba64a8, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731329022006 2024-11-11T12:43:45,193 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 44fcc0e125a94195a557bbac604e7cc5, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731329023078 2024-11-11T12:43:45,193 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 80b06921640b40239a6dd4bb30fe1c5c, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731329022006 2024-11-11T12:43:45,195 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e7aacf346f15438dbe7b2c4892f84f07, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731329023078 2024-11-11T12:43:45,214 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#417 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:45,215 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/24fb282847af49ed95e58b919e5966b1 is 50, key is test_row_0/B:col10/1731329024151/Put/seqid=0 2024-11-11T12:43:45,219 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#418 average throughput is 2.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:45,220 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3160c30e79144c0abfe685bf0b28de38 is 50, key is test_row_0/A:col10/1731329024151/Put/seqid=0 2024-11-11T12:43:45,235 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:43:45,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:45,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:45,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:45,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:45,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:45,235 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:45,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:45,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742326_1502 (size=12409) 2024-11-11T12:43:45,258 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/24fb282847af49ed95e58b919e5966b1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/24fb282847af49ed95e58b919e5966b1 2024-11-11T12:43:45,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/2d128d3548b84f4089afbef04bae09b3 is 50, key is test_row_0/A:col10/1731329024190/Put/seqid=0 2024-11-11T12:43:45,264 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into 24fb282847af49ed95e58b919e5966b1(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:45,264 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:45,264 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=13, startTime=1731329025184; duration=0sec 2024-11-11T12:43:45,264 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:45,264 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:45,264 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:45,265 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33965 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:45,265 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:45,265 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:45,265 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/da95abf61bd44245999c6de3884a1ec7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7bfacc10f4674aca93b6ca9b8308dffa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b22b74314a114643b4c6174c07170146] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=33.2 K 2024-11-11T12:43:45,266 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting da95abf61bd44245999c6de3884a1ec7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=92, earliestPutTs=1731329020856 2024-11-11T12:43:45,266 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7bfacc10f4674aca93b6ca9b8308dffa, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=115, earliestPutTs=1731329022006 2024-11-11T12:43:45,267 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b22b74314a114643b4c6174c07170146, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731329023078 2024-11-11T12:43:45,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742327_1503 (size=12409) 2024-11-11T12:43:45,279 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#420 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:45,280 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/f3a9a09b86a142f295d3780bde9923ea is 50, key is test_row_0/C:col10/1731329024151/Put/seqid=0 2024-11-11T12:43:45,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742328_1504 (size=14541) 2024-11-11T12:43:45,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742329_1505 (size=12409) 2024-11-11T12:43:45,323 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/f3a9a09b86a142f295d3780bde9923ea as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/f3a9a09b86a142f295d3780bde9923ea 2024-11-11T12:43:45,328 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into f3a9a09b86a142f295d3780bde9923ea(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:45,328 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:45,328 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=13, startTime=1731329025185; duration=0sec 2024-11-11T12:43:45,328 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:45,328 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:45,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329085325, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,331 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329085326, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 72 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329085328, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,337 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329085329, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,337 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329085331, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,441 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329085439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,449 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329085444, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,457 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329085456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=130 2024-11-11T12:43:45,595 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 130 completed 2024-11-11T12:43:45,609 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:45,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees 2024-11-11T12:43:45,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-11T12:43:45,624 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:45,628 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=132, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:45,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:45,644 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329085643, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,664 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329085656, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,673 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:45,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329085671, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,717 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/2d128d3548b84f4089afbef04bae09b3 2024-11-11T12:43:45,720 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3160c30e79144c0abfe685bf0b28de38 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3160c30e79144c0abfe685bf0b28de38 2024-11-11T12:43:45,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-11T12:43:45,731 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/461a5146e71c49d08f47fd7aa348a852 is 50, key is test_row_0/B:col10/1731329024190/Put/seqid=0 2024-11-11T12:43:45,734 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into 3160c30e79144c0abfe685bf0b28de38(size=12.1 K), total size for store is 12.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:45,734 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:45,734 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=13, startTime=1731329025184; duration=0sec 2024-11-11T12:43:45,734 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:45,734 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:45,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742330_1506 (size=12151) 2024-11-11T12:43:45,770 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/461a5146e71c49d08f47fd7aa348a852 2024-11-11T12:43:45,789 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,789 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-11T12:43:45,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:45,789 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:45,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:45,790 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] handler.RSProcedureHandler(58): pid=133 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:45,790 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=133 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=133 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:45,803 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/c65513df1f404fbc9022112ff6737eef is 50, key is test_row_0/C:col10/1731329024190/Put/seqid=0 2024-11-11T12:43:45,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742331_1507 (size=12151) 2024-11-11T12:43:45,848 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/c65513df1f404fbc9022112ff6737eef 2024-11-11T12:43:45,869 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/2d128d3548b84f4089afbef04bae09b3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2d128d3548b84f4089afbef04bae09b3 2024-11-11T12:43:45,878 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2d128d3548b84f4089afbef04bae09b3, entries=200, sequenceid=155, filesize=14.2 K 2024-11-11T12:43:45,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/461a5146e71c49d08f47fd7aa348a852 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/461a5146e71c49d08f47fd7aa348a852 2024-11-11T12:43:45,888 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/461a5146e71c49d08f47fd7aa348a852, entries=150, sequenceid=155, filesize=11.9 K 2024-11-11T12:43:45,892 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/c65513df1f404fbc9022112ff6737eef as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c65513df1f404fbc9022112ff6737eef 2024-11-11T12:43:45,896 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c65513df1f404fbc9022112ff6737eef, entries=150, sequenceid=155, filesize=11.9 K 2024-11-11T12:43:45,897 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for bb21a7c6e49c779e06f46670f1405ab7 in 663ms, sequenceid=155, compaction requested=false 2024-11-11T12:43:45,897 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:45,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-11T12:43:45,946 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:45,949 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=133 2024-11-11T12:43:45,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:45,949 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:43:45,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:45,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:45,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:45,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:45,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:45,950 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:45,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:45,968 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:45,986 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/b090613e08784b8198a1a53211a3ab35 is 50, key is test_row_0/A:col10/1731329025277/Put/seqid=0 2024-11-11T12:43:46,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742332_1508 (size=12151) 2024-11-11T12:43:46,064 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329086058, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,065 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329086060, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,065 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329086061, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,167 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329086166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329086166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,168 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,168 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329086166, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-11T12:43:46,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329086340, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,346 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329086344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,370 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,371 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329086369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329086369, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,372 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,372 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329086371, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,440 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/b090613e08784b8198a1a53211a3ab35 2024-11-11T12:43:46,458 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/aab5e0ae3b904da4831b89e17e24156a is 50, key is test_row_0/B:col10/1731329025277/Put/seqid=0 2024-11-11T12:43:46,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742333_1509 (size=12151) 2024-11-11T12:43:46,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329086674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,680 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329086674, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,681 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:46,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329086677, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:46,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-11T12:43:46,903 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/aab5e0ae3b904da4831b89e17e24156a 2024-11-11T12:43:46,911 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/cf3b63739620498b82254002ab926fd9 is 50, key is test_row_0/C:col10/1731329025277/Put/seqid=0 2024-11-11T12:43:46,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742334_1510 (size=12151) 2024-11-11T12:43:46,935 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/cf3b63739620498b82254002ab926fd9 2024-11-11T12:43:46,940 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/b090613e08784b8198a1a53211a3ab35 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/b090613e08784b8198a1a53211a3ab35 2024-11-11T12:43:46,945 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/b090613e08784b8198a1a53211a3ab35, entries=150, sequenceid=171, filesize=11.9 K 2024-11-11T12:43:46,946 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/aab5e0ae3b904da4831b89e17e24156a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/aab5e0ae3b904da4831b89e17e24156a 2024-11-11T12:43:46,952 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/aab5e0ae3b904da4831b89e17e24156a, entries=150, sequenceid=171, filesize=11.9 K 2024-11-11T12:43:46,953 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/cf3b63739620498b82254002ab926fd9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/cf3b63739620498b82254002ab926fd9 2024-11-11T12:43:46,958 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/cf3b63739620498b82254002ab926fd9, entries=150, sequenceid=171, filesize=11.9 K 2024-11-11T12:43:46,959 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=140.89 KB/144270 for bb21a7c6e49c779e06f46670f1405ab7 in 1009ms, sequenceid=171, compaction requested=true 2024-11-11T12:43:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:46,959 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=133}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=133 2024-11-11T12:43:46,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=133 2024-11-11T12:43:46,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-11-11T12:43:46,962 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.3320 sec 2024-11-11T12:43:46,969 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=132, table=TestAcidGuarantees in 1.3590 sec 2024-11-11T12:43:47,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:47,198 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-11T12:43:47,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:47,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:47,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:47,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:47,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:47,198 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:47,219 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/6d58241da9eb42d0b0974011e3f4722a is 50, key is test_row_0/A:col10/1731329027192/Put/seqid=0 2024-11-11T12:43:47,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329087226, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 94 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329087225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,235 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329087227, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742335_1511 (size=16931) 2024-11-11T12:43:47,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329087336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,340 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329087336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,341 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329087336, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,543 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,543 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329087541, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,547 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329087545, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,552 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329087547, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,668 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/6d58241da9eb42d0b0974011e3f4722a 2024-11-11T12:43:47,707 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/b1f49007e7f346de8ab9a47370fe766a is 50, key is test_row_0/B:col10/1731329027192/Put/seqid=0 2024-11-11T12:43:47,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742336_1512 (size=12151) 2024-11-11T12:43:47,734 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/b1f49007e7f346de8ab9a47370fe766a 2024-11-11T12:43:47,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=132 2024-11-11T12:43:47,741 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 132 completed 2024-11-11T12:43:47,742 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:47,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees 2024-11-11T12:43:47,744 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:47,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-11T12:43:47,745 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=134, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:47,745 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:47,754 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/8d424cba938c4b3f9ac46999c74e8b44 is 50, key is test_row_0/C:col10/1731329027192/Put/seqid=0 2024-11-11T12:43:47,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742337_1513 (size=12151) 2024-11-11T12:43:47,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-11T12:43:47,851 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329087846, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329087852, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,858 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:47,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329087855, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,896 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:47,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-11T12:43:47,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:47,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:47,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:47,897 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:47,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:47,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:48,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-11T12:43:48,049 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,050 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-11T12:43:48,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:48,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:48,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:48,050 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] handler.RSProcedureHandler(58): pid=135 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:48,051 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=135 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:48,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=135 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:48,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=197 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/8d424cba938c4b3f9ac46999c74e8b44 2024-11-11T12:43:48,174 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/6d58241da9eb42d0b0974011e3f4722a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6d58241da9eb42d0b0974011e3f4722a 2024-11-11T12:43:48,181 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6d58241da9eb42d0b0974011e3f4722a, entries=250, sequenceid=197, filesize=16.5 K 2024-11-11T12:43:48,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/b1f49007e7f346de8ab9a47370fe766a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b1f49007e7f346de8ab9a47370fe766a 2024-11-11T12:43:48,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b1f49007e7f346de8ab9a47370fe766a, entries=150, sequenceid=197, filesize=11.9 K 2024-11-11T12:43:48,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/8d424cba938c4b3f9ac46999c74e8b44 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/8d424cba938c4b3f9ac46999c74e8b44 2024-11-11T12:43:48,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/8d424cba938c4b3f9ac46999c74e8b44, entries=150, sequenceid=197, filesize=11.9 K 2024-11-11T12:43:48,200 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=53.67 KB/54960 for bb21a7c6e49c779e06f46670f1405ab7 in 1002ms, sequenceid=197, compaction requested=true 2024-11-11T12:43:48,200 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:48,201 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:48,201 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:48,201 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:48,203 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,203 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=135 2024-11-11T12:43:48,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:48,203 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:43:48,203 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:48,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:48,207 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 56032 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:48,207 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:48,207 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:48,207 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3160c30e79144c0abfe685bf0b28de38, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2d128d3548b84f4089afbef04bae09b3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/b090613e08784b8198a1a53211a3ab35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6d58241da9eb42d0b0974011e3f4722a] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=54.7 K 2024-11-11T12:43:48,213 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:48,213 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:48,213 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3160c30e79144c0abfe685bf0b28de38, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731329022011 2024-11-11T12:43:48,213 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:48,213 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/24fb282847af49ed95e58b919e5966b1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/461a5146e71c49d08f47fd7aa348a852, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/aab5e0ae3b904da4831b89e17e24156a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b1f49007e7f346de8ab9a47370fe766a] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=47.7 K 2024-11-11T12:43:48,220 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d128d3548b84f4089afbef04bae09b3, keycount=200, bloomtype=ROW, size=14.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731329024185 2024-11-11T12:43:48,220 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 24fb282847af49ed95e58b919e5966b1, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731329022011 2024-11-11T12:43:48,227 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 461a5146e71c49d08f47fd7aa348a852, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731329024189 2024-11-11T12:43:48,227 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b090613e08784b8198a1a53211a3ab35, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731329025277 2024-11-11T12:43:48,228 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting aab5e0ae3b904da4831b89e17e24156a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731329025277 2024-11-11T12:43:48,229 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6d58241da9eb42d0b0974011e3f4722a, keycount=250, bloomtype=ROW, size=16.5 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731329026049 2024-11-11T12:43:48,229 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b1f49007e7f346de8ab9a47370fe766a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731329026056 2024-11-11T12:43:48,231 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/bed4079f101549b3a4b661ec74385a74 is 50, key is test_row_1/A:col10/1731329027224/Put/seqid=0 2024-11-11T12:43:48,276 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#430 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:48,277 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/104be03a28ee4f9a87505c32174e8dad is 50, key is test_row_0/A:col10/1731329027192/Put/seqid=0 2024-11-11T12:43:48,278 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#431 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:48,279 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/b0679d4522994906a92827086ef8ba04 is 50, key is test_row_0/B:col10/1731329027192/Put/seqid=0 2024-11-11T12:43:48,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742338_1514 (size=9757) 2024-11-11T12:43:48,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742339_1515 (size=12595) 2024-11-11T12:43:48,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742340_1516 (size=12595) 2024-11-11T12:43:48,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-11T12:43:48,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:48,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:48,458 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329088445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329088445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329088446, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329088447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,460 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 82 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329088448, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329088560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,567 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329088561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329088561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329088561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,568 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329088565, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,695 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/bed4079f101549b3a4b661ec74385a74 2024-11-11T12:43:48,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/f79b13db358d43fc9308317c66bc64b9 is 50, key is test_row_1/B:col10/1731329027224/Put/seqid=0 2024-11-11T12:43:48,716 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/b0679d4522994906a92827086ef8ba04 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b0679d4522994906a92827086ef8ba04 2024-11-11T12:43:48,725 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into b0679d4522994906a92827086ef8ba04(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:48,725 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/104be03a28ee4f9a87505c32174e8dad as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/104be03a28ee4f9a87505c32174e8dad 2024-11-11T12:43:48,725 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:48,725 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=12, startTime=1731329028201; duration=0sec 2024-11-11T12:43:48,725 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:48,725 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:48,725 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:48,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742341_1517 (size=9757) 2024-11-11T12:43:48,727 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/f79b13db358d43fc9308317c66bc64b9 2024-11-11T12:43:48,729 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48862 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:48,729 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:48,729 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:48,729 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/f3a9a09b86a142f295d3780bde9923ea, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c65513df1f404fbc9022112ff6737eef, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/cf3b63739620498b82254002ab926fd9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/8d424cba938c4b3f9ac46999c74e8b44] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=47.7 K 2024-11-11T12:43:48,730 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f3a9a09b86a142f295d3780bde9923ea, keycount=150, bloomtype=ROW, size=12.1 K, encoding=NONE, compression=NONE, seqNum=132, earliestPutTs=1731329022011 2024-11-11T12:43:48,731 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c65513df1f404fbc9022112ff6737eef, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1731329024189 2024-11-11T12:43:48,731 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting cf3b63739620498b82254002ab926fd9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1731329025277 2024-11-11T12:43:48,732 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d424cba938c4b3f9ac46999c74e8b44, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731329026056 2024-11-11T12:43:48,734 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into 104be03a28ee4f9a87505c32174e8dad(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:48,734 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:48,734 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=12, startTime=1731329028200; duration=0sec 2024-11-11T12:43:48,734 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:48,734 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:48,737 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/c622380714a44950bf9f4ddc78d11127 is 50, key is test_row_1/C:col10/1731329027224/Put/seqid=0 2024-11-11T12:43:48,741 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#434 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:48,741 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/062971a4d76d481199bc00b04ffcf5b2 is 50, key is test_row_0/C:col10/1731329027192/Put/seqid=0 2024-11-11T12:43:48,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742342_1518 (size=9757) 2024-11-11T12:43:48,766 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329088763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,771 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329088769, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,772 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329088770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,775 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329088771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,775 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:48,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329088772, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:48,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742343_1519 (size=12595) 2024-11-11T12:43:48,795 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/062971a4d76d481199bc00b04ffcf5b2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/062971a4d76d481199bc00b04ffcf5b2 2024-11-11T12:43:48,802 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into 062971a4d76d481199bc00b04ffcf5b2(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:48,802 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:48,802 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=12, startTime=1731329028201; duration=0sec 2024-11-11T12:43:48,802 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:48,802 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:48,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-11T12:43:49,069 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329089067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,075 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 133 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329089073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,079 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329089076, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,084 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329089080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,087 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329089082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,145 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=208 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/c622380714a44950bf9f4ddc78d11127 2024-11-11T12:43:49,150 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/bed4079f101549b3a4b661ec74385a74 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/bed4079f101549b3a4b661ec74385a74 2024-11-11T12:43:49,156 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/bed4079f101549b3a4b661ec74385a74, entries=100, sequenceid=208, filesize=9.5 K 2024-11-11T12:43:49,157 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/f79b13db358d43fc9308317c66bc64b9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/f79b13db358d43fc9308317c66bc64b9 2024-11-11T12:43:49,162 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/f79b13db358d43fc9308317c66bc64b9, entries=100, sequenceid=208, filesize=9.5 K 2024-11-11T12:43:49,163 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/c622380714a44950bf9f4ddc78d11127 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c622380714a44950bf9f4ddc78d11127 2024-11-11T12:43:49,181 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c622380714a44950bf9f4ddc78d11127, entries=100, sequenceid=208, filesize=9.5 K 2024-11-11T12:43:49,182 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for bb21a7c6e49c779e06f46670f1405ab7 in 979ms, sequenceid=208, compaction requested=false 2024-11-11T12:43:49,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:49,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:49,182 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=135}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=135 2024-11-11T12:43:49,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=135 2024-11-11T12:43:49,189 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=135, resume processing ppid=134 2024-11-11T12:43:49,189 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, ppid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4380 sec 2024-11-11T12:43:49,191 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=134, table=TestAcidGuarantees in 1.4480 sec 2024-11-11T12:43:49,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:49,575 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-11T12:43:49,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:49,575 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:49,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:49,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:49,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:49,576 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:49,580 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/721e278b88c74e298169a23ba2f7fc55 is 50, key is test_row_0/A:col10/1731329029574/Put/seqid=0 2024-11-11T12:43:49,596 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329089589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,597 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329089590, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742344_1520 (size=12151) 2024-11-11T12:43:49,598 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/721e278b88c74e298169a23ba2f7fc55 2024-11-11T12:43:49,600 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329089592, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329089596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,601 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329089597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/6944fa711a724984a6858de46b5f96b9 is 50, key is test_row_0/B:col10/1731329029574/Put/seqid=0 2024-11-11T12:43:49,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742345_1521 (size=12151) 2024-11-11T12:43:49,624 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/6944fa711a724984a6858de46b5f96b9 2024-11-11T12:43:49,646 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b46b7194b3784a6c8f95895287703cbd is 50, key is test_row_0/C:col10/1731329029574/Put/seqid=0 2024-11-11T12:43:49,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742346_1522 (size=12151) 2024-11-11T12:43:49,655 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b46b7194b3784a6c8f95895287703cbd 2024-11-11T12:43:49,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/721e278b88c74e298169a23ba2f7fc55 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/721e278b88c74e298169a23ba2f7fc55 2024-11-11T12:43:49,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/721e278b88c74e298169a23ba2f7fc55, entries=150, sequenceid=237, filesize=11.9 K 2024-11-11T12:43:49,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/6944fa711a724984a6858de46b5f96b9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/6944fa711a724984a6858de46b5f96b9 2024-11-11T12:43:49,681 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/6944fa711a724984a6858de46b5f96b9, entries=150, sequenceid=237, filesize=11.9 K 2024-11-11T12:43:49,682 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b46b7194b3784a6c8f95895287703cbd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b46b7194b3784a6c8f95895287703cbd 2024-11-11T12:43:49,690 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b46b7194b3784a6c8f95895287703cbd, entries=150, sequenceid=237, filesize=11.9 K 2024-11-11T12:43:49,692 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for bb21a7c6e49c779e06f46670f1405ab7 in 116ms, sequenceid=237, compaction requested=true 2024-11-11T12:43:49,692 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:49,692 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:49,695 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:49,695 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:49,695 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:49,695 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:49,696 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/104be03a28ee4f9a87505c32174e8dad, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/bed4079f101549b3a4b661ec74385a74, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/721e278b88c74e298169a23ba2f7fc55] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=33.7 K 2024-11-11T12:43:49,696 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 104be03a28ee4f9a87505c32174e8dad, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731329026056 2024-11-11T12:43:49,696 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting bed4079f101549b3a4b661ec74385a74, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1731329027214 2024-11-11T12:43:49,696 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:49,697 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 721e278b88c74e298169a23ba2f7fc55, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329028445 2024-11-11T12:43:49,697 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:49,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:49,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:49,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:49,697 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:49,699 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:49,699 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:49,699 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:49,699 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b0679d4522994906a92827086ef8ba04, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/f79b13db358d43fc9308317c66bc64b9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/6944fa711a724984a6858de46b5f96b9] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=33.7 K 2024-11-11T12:43:49,699 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b0679d4522994906a92827086ef8ba04, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731329026056 2024-11-11T12:43:49,700 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f79b13db358d43fc9308317c66bc64b9, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1731329027214 2024-11-11T12:43:49,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:49,702 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 6944fa711a724984a6858de46b5f96b9, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329028445 2024-11-11T12:43:49,702 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:43:49,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:49,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:49,702 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:49,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:49,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:49,703 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:49,718 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#438 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:49,720 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/74e0f9e0362247d7acb0841450d6d1c1 is 50, key is test_row_0/A:col10/1731329029574/Put/seqid=0 2024-11-11T12:43:49,722 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/8b770bcad69b4c8893b234696f58c5e5 is 50, key is test_row_0/A:col10/1731329029585/Put/seqid=0 2024-11-11T12:43:49,724 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#440 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:49,724 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/befe2f39033c4203a9ae1142258cde78 is 50, key is test_row_0/B:col10/1731329029574/Put/seqid=0 2024-11-11T12:43:49,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742348_1524 (size=12147) 2024-11-11T12:43:49,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329089755, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,762 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329089757, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329089758, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742349_1525 (size=12697) 2024-11-11T12:43:49,785 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/befe2f39033c4203a9ae1142258cde78 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/befe2f39033c4203a9ae1142258cde78 2024-11-11T12:43:49,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742347_1523 (size=12697) 2024-11-11T12:43:49,794 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into befe2f39033c4203a9ae1142258cde78(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:49,794 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:49,794 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=13, startTime=1731329029696; duration=0sec 2024-11-11T12:43:49,794 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:49,794 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:49,794 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:49,795 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/74e0f9e0362247d7acb0841450d6d1c1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74e0f9e0362247d7acb0841450d6d1c1 2024-11-11T12:43:49,796 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 34503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:49,796 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:49,796 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:49,796 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/062971a4d76d481199bc00b04ffcf5b2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c622380714a44950bf9f4ddc78d11127, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b46b7194b3784a6c8f95895287703cbd] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=33.7 K 2024-11-11T12:43:49,797 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 062971a4d76d481199bc00b04ffcf5b2, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=197, earliestPutTs=1731329026056 2024-11-11T12:43:49,798 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c622380714a44950bf9f4ddc78d11127, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=208, earliestPutTs=1731329027214 2024-11-11T12:43:49,798 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b46b7194b3784a6c8f95895287703cbd, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329028445 2024-11-11T12:43:49,802 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into 74e0f9e0362247d7acb0841450d6d1c1(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:49,802 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:49,802 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=13, startTime=1731329029692; duration=0sec 2024-11-11T12:43:49,803 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:49,803 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:49,808 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#441 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:49,809 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7aacbea74a74432491958c71cdabf3f6 is 50, key is test_row_0/C:col10/1731329029574/Put/seqid=0 2024-11-11T12:43:49,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742350_1526 (size=12697) 2024-11-11T12:43:49,836 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7aacbea74a74432491958c71cdabf3f6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7aacbea74a74432491958c71cdabf3f6 2024-11-11T12:43:49,841 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into 7aacbea74a74432491958c71cdabf3f6(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:49,841 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:49,841 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=13, startTime=1731329029697; duration=0sec 2024-11-11T12:43:49,841 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:49,841 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:49,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=134 2024-11-11T12:43:49,850 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 134 completed 2024-11-11T12:43:49,854 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:49,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees 2024-11-11T12:43:49,860 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:49,861 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=136, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:49,861 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:49,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-11T12:43:49,863 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329089863, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329089866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,870 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:49,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329089866, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:49,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-11T12:43:50,015 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,016 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-11T12:43:50,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:50,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:50,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:50,020 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:50,020 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:50,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:50,066 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329090066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329090071, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,076 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329090073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,146 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/8b770bcad69b4c8893b234696f58c5e5 2024-11-11T12:43:50,153 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a7a049f896bd44faa68c8f6bf0068e46 is 50, key is test_row_0/B:col10/1731329029585/Put/seqid=0 2024-11-11T12:43:50,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-11T12:43:50,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742351_1527 (size=9757) 2024-11-11T12:43:50,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a7a049f896bd44faa68c8f6bf0068e46 2024-11-11T12:43:50,173 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,174 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-11T12:43:50,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:50,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:50,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:50,174 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] handler.RSProcedureHandler(58): pid=137 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:50,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=137 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:50,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=137 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:50,191 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/32e973207ce24050a8e0e8c81ef447ed is 50, key is test_row_0/C:col10/1731329029585/Put/seqid=0 2024-11-11T12:43:50,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742352_1528 (size=9757) 2024-11-11T12:43:50,218 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=248 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/32e973207ce24050a8e0e8c81ef447ed 2024-11-11T12:43:50,226 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/8b770bcad69b4c8893b234696f58c5e5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8b770bcad69b4c8893b234696f58c5e5 2024-11-11T12:43:50,236 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8b770bcad69b4c8893b234696f58c5e5, entries=150, sequenceid=248, filesize=11.9 K 2024-11-11T12:43:50,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/a7a049f896bd44faa68c8f6bf0068e46 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a7a049f896bd44faa68c8f6bf0068e46 2024-11-11T12:43:50,249 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a7a049f896bd44faa68c8f6bf0068e46, entries=100, sequenceid=248, filesize=9.5 K 2024-11-11T12:43:50,249 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/32e973207ce24050a8e0e8c81ef447ed as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/32e973207ce24050a8e0e8c81ef447ed 2024-11-11T12:43:50,258 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/32e973207ce24050a8e0e8c81ef447ed, entries=100, sequenceid=248, filesize=9.5 K 2024-11-11T12:43:50,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for bb21a7c6e49c779e06f46670f1405ab7 in 557ms, sequenceid=248, compaction requested=false 2024-11-11T12:43:50,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:50,326 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,328 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=137 2024-11-11T12:43:50,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:50,329 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:43:50,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:50,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:50,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:50,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:50,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:50,329 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:50,349 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/65499c2c9796448bbddf8930221327bb is 50, key is test_row_0/A:col10/1731329029755/Put/seqid=0 2024-11-11T12:43:50,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:50,373 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:50,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742353_1529 (size=12301) 2024-11-11T12:43:50,397 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329090390, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,399 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,399 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329090394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,400 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,400 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329090396, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-11T12:43:50,500 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329090498, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,503 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329090501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,504 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329090501, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,609 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329090605, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329090617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,706 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329090703, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,711 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329090705, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,713 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:50,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329090712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:50,792 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/65499c2c9796448bbddf8930221327bb 2024-11-11T12:43:50,825 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/629c00a3acc44196bc340eb515b0ced9 is 50, key is test_row_0/B:col10/1731329029755/Put/seqid=0 2024-11-11T12:43:50,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742354_1530 (size=12301) 2024-11-11T12:43:50,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-11T12:43:51,012 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329091008, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,017 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329091014, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,021 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329091020, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,264 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/629c00a3acc44196bc340eb515b0ced9 2024-11-11T12:43:51,300 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/d16ccf50c3794f09a9d939fdb43841f1 is 50, key is test_row_0/C:col10/1731329029755/Put/seqid=0 2024-11-11T12:43:51,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742355_1531 (size=12301) 2024-11-11T12:43:51,328 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/d16ccf50c3794f09a9d939fdb43841f1 2024-11-11T12:43:51,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/65499c2c9796448bbddf8930221327bb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/65499c2c9796448bbddf8930221327bb 2024-11-11T12:43:51,345 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/65499c2c9796448bbddf8930221327bb, entries=150, sequenceid=276, filesize=12.0 K 2024-11-11T12:43:51,346 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/629c00a3acc44196bc340eb515b0ced9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/629c00a3acc44196bc340eb515b0ced9 2024-11-11T12:43:51,354 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/629c00a3acc44196bc340eb515b0ced9, entries=150, sequenceid=276, filesize=12.0 K 2024-11-11T12:43:51,355 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/d16ccf50c3794f09a9d939fdb43841f1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d16ccf50c3794f09a9d939fdb43841f1 2024-11-11T12:43:51,362 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d16ccf50c3794f09a9d939fdb43841f1, entries=150, sequenceid=276, filesize=12.0 K 2024-11-11T12:43:51,364 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for bb21a7c6e49c779e06f46670f1405ab7 in 1034ms, sequenceid=276, compaction requested=true 2024-11-11T12:43:51,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:51,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:51,364 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=137}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=137 2024-11-11T12:43:51,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=137 2024-11-11T12:43:51,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=137, resume processing ppid=136 2024-11-11T12:43:51,387 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.5170 sec 2024-11-11T12:43:51,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=136, table=TestAcidGuarantees in 1.5330 sec 2024-11-11T12:43:51,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:51,530 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-11T12:43:51,531 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:51,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:51,542 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/cdc012975751491f8f7ae61d7144f7e6 is 50, key is test_row_0/A:col10/1731329030390/Put/seqid=0 2024-11-11T12:43:51,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742356_1532 (size=12297) 2024-11-11T12:43:51,571 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/cdc012975751491f8f7ae61d7144f7e6 2024-11-11T12:43:51,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/8833bb137bdf4b699bd60fe780f583bc is 50, key is test_row_0/B:col10/1731329030390/Put/seqid=0 2024-11-11T12:43:51,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 169 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329091601, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,606 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 157 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329091602, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,607 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329091603, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742357_1533 (size=9857) 2024-11-11T12:43:51,620 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/8833bb137bdf4b699bd60fe780f583bc 2024-11-11T12:43:51,654 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/318a0df74846429fadae6f2e2fb9a5a2 is 50, key is test_row_0/C:col10/1731329030390/Put/seqid=0 2024-11-11T12:43:51,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742358_1534 (size=9857) 2024-11-11T12:43:51,711 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 159 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329091709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,712 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 171 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329091709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,713 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329091709, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,915 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 161 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329091913, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329091914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,916 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:51,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 173 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329091914, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:51,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-11-11T12:43:51,971 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 136 completed 2024-11-11T12:43:51,972 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:51,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees 2024-11-11T12:43:51,974 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:51,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-11T12:43:51,974 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=138, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:51,974 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=138, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:52,069 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=288 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/318a0df74846429fadae6f2e2fb9a5a2 2024-11-11T12:43:52,073 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/cdc012975751491f8f7ae61d7144f7e6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/cdc012975751491f8f7ae61d7144f7e6 2024-11-11T12:43:52,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-11T12:43:52,076 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/cdc012975751491f8f7ae61d7144f7e6, entries=150, sequenceid=288, filesize=12.0 K 2024-11-11T12:43:52,077 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/8833bb137bdf4b699bd60fe780f583bc as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8833bb137bdf4b699bd60fe780f583bc 2024-11-11T12:43:52,080 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8833bb137bdf4b699bd60fe780f583bc, entries=100, sequenceid=288, filesize=9.6 K 2024-11-11T12:43:52,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/318a0df74846429fadae6f2e2fb9a5a2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/318a0df74846429fadae6f2e2fb9a5a2 2024-11-11T12:43:52,084 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/318a0df74846429fadae6f2e2fb9a5a2, entries=100, sequenceid=288, filesize=9.6 K 2024-11-11T12:43:52,088 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for bb21a7c6e49c779e06f46670f1405ab7 in 557ms, sequenceid=288, compaction requested=true 2024-11-11T12:43:52,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:52,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:52,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:52,088 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:52,088 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:52,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:52,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:52,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:52,088 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:52,089 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 44612 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:52,089 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:52,089 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49442 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:52,090 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:52,090 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:52,090 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:52,090 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/befe2f39033c4203a9ae1142258cde78, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a7a049f896bd44faa68c8f6bf0068e46, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/629c00a3acc44196bc340eb515b0ced9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8833bb137bdf4b699bd60fe780f583bc] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=43.6 K 2024-11-11T12:43:52,090 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74e0f9e0362247d7acb0841450d6d1c1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8b770bcad69b4c8893b234696f58c5e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/65499c2c9796448bbddf8930221327bb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/cdc012975751491f8f7ae61d7144f7e6] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=48.3 K 2024-11-11T12:43:52,090 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74e0f9e0362247d7acb0841450d6d1c1, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329028445 2024-11-11T12:43:52,090 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting befe2f39033c4203a9ae1142258cde78, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329028445 2024-11-11T12:43:52,091 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 8b770bcad69b4c8893b234696f58c5e5, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731329029585 2024-11-11T12:43:52,091 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a7a049f896bd44faa68c8f6bf0068e46, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731329029585 2024-11-11T12:43:52,092 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 629c00a3acc44196bc340eb515b0ced9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731329029746 2024-11-11T12:43:52,092 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 65499c2c9796448bbddf8930221327bb, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731329029746 2024-11-11T12:43:52,092 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8833bb137bdf4b699bd60fe780f583bc, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731329030390 2024-11-11T12:43:52,092 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting cdc012975751491f8f7ae61d7144f7e6, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731329030390 2024-11-11T12:43:52,100 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#450 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:52,101 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/8d32d679d71a47e4a92f3d8d5a238c8b is 50, key is test_row_0/B:col10/1731329030390/Put/seqid=0 2024-11-11T12:43:52,110 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#451 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:52,111 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3cfffa8f83d743fbb04ae68ec0fae325 is 50, key is test_row_0/A:col10/1731329030390/Put/seqid=0 2024-11-11T12:43:52,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742359_1535 (size=12983) 2024-11-11T12:43:52,121 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/8d32d679d71a47e4a92f3d8d5a238c8b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8d32d679d71a47e4a92f3d8d5a238c8b 2024-11-11T12:43:52,126 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,127 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=139 2024-11-11T12:43:52,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:52,127 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:43:52,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:52,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:52,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:52,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:52,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:52,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:52,139 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/2aa58c31c8d344ffac7048ee4a3d9102 is 50, key is test_row_0/A:col10/1731329031602/Put/seqid=0 2024-11-11T12:43:52,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742360_1536 (size=12983) 2024-11-11T12:43:52,140 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into 8d32d679d71a47e4a92f3d8d5a238c8b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:52,140 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:52,141 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=12, startTime=1731329032088; duration=0sec 2024-11-11T12:43:52,141 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:52,141 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:52,141 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:43:52,143 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 44612 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:43:52,143 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:52,143 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:52,143 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7aacbea74a74432491958c71cdabf3f6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/32e973207ce24050a8e0e8c81ef447ed, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d16ccf50c3794f09a9d939fdb43841f1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/318a0df74846429fadae6f2e2fb9a5a2] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=43.6 K 2024-11-11T12:43:52,143 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 7aacbea74a74432491958c71cdabf3f6, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329028445 2024-11-11T12:43:52,144 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 32e973207ce24050a8e0e8c81ef447ed, keycount=100, bloomtype=ROW, size=9.5 K, encoding=NONE, compression=NONE, seqNum=248, earliestPutTs=1731329029585 2024-11-11T12:43:52,144 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d16ccf50c3794f09a9d939fdb43841f1, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731329029746 2024-11-11T12:43:52,144 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 318a0df74846429fadae6f2e2fb9a5a2, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731329030390 2024-11-11T12:43:52,147 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3cfffa8f83d743fbb04ae68ec0fae325 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3cfffa8f83d743fbb04ae68ec0fae325 2024-11-11T12:43:52,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742361_1537 (size=12301) 2024-11-11T12:43:52,153 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/2aa58c31c8d344ffac7048ee4a3d9102 2024-11-11T12:43:52,155 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#453 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:52,156 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/1e22284d06fe4b9e9e4d3a615c94a31b is 50, key is test_row_0/C:col10/1731329030390/Put/seqid=0 2024-11-11T12:43:52,160 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into 3cfffa8f83d743fbb04ae68ec0fae325(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:52,160 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:52,160 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=12, startTime=1731329032088; duration=0sec 2024-11-11T12:43:52,160 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:52,160 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:52,166 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/bee2e074daa94086932d4845f13a441e is 50, key is test_row_0/B:col10/1731329031602/Put/seqid=0 2024-11-11T12:43:52,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742362_1538 (size=12983) 2024-11-11T12:43:52,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742363_1539 (size=12301) 2024-11-11T12:43:52,222 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:52,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:52,237 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329092230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,239 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329092236, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,243 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329092237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-11T12:43:52,338 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329092338, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,342 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329092341, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,345 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329092344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,540 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329092540, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329092544, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,547 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329092546, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-11T12:43:52,583 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/1e22284d06fe4b9e9e4d3a615c94a31b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e22284d06fe4b9e9e4d3a615c94a31b 2024-11-11T12:43:52,586 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/bee2e074daa94086932d4845f13a441e 2024-11-11T12:43:52,588 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into 1e22284d06fe4b9e9e4d3a615c94a31b(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:52,588 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:52,588 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=12, startTime=1731329032088; duration=0sec 2024-11-11T12:43:52,588 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:52,588 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:52,597 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/3c5f27eac1364c15b65f5b4032834e5f is 50, key is test_row_0/C:col10/1731329031602/Put/seqid=0 2024-11-11T12:43:52,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742364_1540 (size=12301) 2024-11-11T12:43:52,602 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/3c5f27eac1364c15b65f5b4032834e5f 2024-11-11T12:43:52,605 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/2aa58c31c8d344ffac7048ee4a3d9102 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2aa58c31c8d344ffac7048ee4a3d9102 2024-11-11T12:43:52,608 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2aa58c31c8d344ffac7048ee4a3d9102, entries=150, sequenceid=312, filesize=12.0 K 2024-11-11T12:43:52,609 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/bee2e074daa94086932d4845f13a441e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/bee2e074daa94086932d4845f13a441e 2024-11-11T12:43:52,612 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/bee2e074daa94086932d4845f13a441e, entries=150, sequenceid=312, filesize=12.0 K 2024-11-11T12:43:52,613 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/3c5f27eac1364c15b65f5b4032834e5f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/3c5f27eac1364c15b65f5b4032834e5f 2024-11-11T12:43:52,616 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/3c5f27eac1364c15b65f5b4032834e5f, entries=150, sequenceid=312, filesize=12.0 K 2024-11-11T12:43:52,617 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=60.38 KB/61830 for bb21a7c6e49c779e06f46670f1405ab7 in 490ms, sequenceid=312, compaction requested=false 2024-11-11T12:43:52,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:52,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:52,617 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-11T12:43:52,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=139 2024-11-11T12:43:52,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=138 2024-11-11T12:43:52,620 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 644 msec 2024-11-11T12:43:52,622 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=138, table=TestAcidGuarantees in 649 msec 2024-11-11T12:43:52,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:52,623 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:43:52,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:52,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:52,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:52,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:52,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:52,623 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:52,630 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3327f06c29634decb58f27682f41ec12 is 50, key is test_row_0/A:col10/1731329032236/Put/seqid=0 2024-11-11T12:43:52,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742365_1541 (size=14741) 2024-11-11T12:43:52,636 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3327f06c29634decb58f27682f41ec12 2024-11-11T12:43:52,643 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/d65d46c378934e008d5e8c13b676c234 is 50, key is test_row_0/B:col10/1731329032236/Put/seqid=0 2024-11-11T12:43:52,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742366_1542 (size=12301) 2024-11-11T12:43:52,696 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329092690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,701 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329092695, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329092797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,806 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329092802, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,848 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329092844, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,853 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329092849, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:52,854 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:52,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329092850, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,005 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329093002, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,011 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329093007, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,048 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/d65d46c378934e008d5e8c13b676c234 2024-11-11T12:43:53,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/80b93f085a1e4275a7bccfdcba4b148c is 50, key is test_row_0/C:col10/1731329032236/Put/seqid=0 2024-11-11T12:43:53,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742367_1543 (size=12301) 2024-11-11T12:43:53,066 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/80b93f085a1e4275a7bccfdcba4b148c 2024-11-11T12:43:53,076 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/3327f06c29634decb58f27682f41ec12 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3327f06c29634decb58f27682f41ec12 2024-11-11T12:43:53,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=138 2024-11-11T12:43:53,081 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 138 completed 2024-11-11T12:43:53,083 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees 2024-11-11T12:43:53,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-11T12:43:53,086 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:53,087 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=140, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:53,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:53,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3327f06c29634decb58f27682f41ec12, entries=200, sequenceid=328, filesize=14.4 K 2024-11-11T12:43:53,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/d65d46c378934e008d5e8c13b676c234 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d65d46c378934e008d5e8c13b676c234 2024-11-11T12:43:53,096 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d65d46c378934e008d5e8c13b676c234, entries=150, sequenceid=328, filesize=12.0 K 2024-11-11T12:43:53,096 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/80b93f085a1e4275a7bccfdcba4b148c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/80b93f085a1e4275a7bccfdcba4b148c 2024-11-11T12:43:53,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/80b93f085a1e4275a7bccfdcba4b148c, entries=150, sequenceid=328, filesize=12.0 K 2024-11-11T12:43:53,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for bb21a7c6e49c779e06f46670f1405ab7 in 487ms, sequenceid=328, compaction requested=true 2024-11-11T12:43:53,110 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:53,110 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:53,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:53,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:53,110 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:53,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:53,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:53,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:53,110 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:53,111 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40025 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:53,111 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:53,111 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:53,111 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3cfffa8f83d743fbb04ae68ec0fae325, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2aa58c31c8d344ffac7048ee4a3d9102, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3327f06c29634decb58f27682f41ec12] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=39.1 K 2024-11-11T12:43:53,111 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:53,111 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:53,111 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:53,111 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8d32d679d71a47e4a92f3d8d5a238c8b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/bee2e074daa94086932d4845f13a441e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d65d46c378934e008d5e8c13b676c234] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=36.7 K 2024-11-11T12:43:53,112 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3cfffa8f83d743fbb04ae68ec0fae325, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731329029746 2024-11-11T12:43:53,112 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8d32d679d71a47e4a92f3d8d5a238c8b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731329029746 2024-11-11T12:43:53,112 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2aa58c31c8d344ffac7048ee4a3d9102, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1731329031599 2024-11-11T12:43:53,112 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bee2e074daa94086932d4845f13a441e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1731329031599 2024-11-11T12:43:53,112 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3327f06c29634decb58f27682f41ec12, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329032223 2024-11-11T12:43:53,113 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d65d46c378934e008d5e8c13b676c234, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329032229 2024-11-11T12:43:53,122 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#459 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:53,122 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#460 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:53,123 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/0b042e16391f4254919f896febe0fd10 is 50, key is test_row_0/B:col10/1731329032236/Put/seqid=0 2024-11-11T12:43:53,123 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/ed09f19183de4e20b73f08ae0bb12404 is 50, key is test_row_0/A:col10/1731329032236/Put/seqid=0 2024-11-11T12:43:53,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742368_1544 (size=13085) 2024-11-11T12:43:53,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742369_1545 (size=13085) 2024-11-11T12:43:53,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-11T12:43:53,185 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/ed09f19183de4e20b73f08ae0bb12404 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/ed09f19183de4e20b73f08ae0bb12404 2024-11-11T12:43:53,192 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into ed09f19183de4e20b73f08ae0bb12404(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:53,192 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:53,192 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=13, startTime=1731329033110; duration=0sec 2024-11-11T12:43:53,192 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:53,192 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:53,192 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:53,193 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37585 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:53,193 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:53,193 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:53,193 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e22284d06fe4b9e9e4d3a615c94a31b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/3c5f27eac1364c15b65f5b4032834e5f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/80b93f085a1e4275a7bccfdcba4b148c] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=36.7 K 2024-11-11T12:43:53,194 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1e22284d06fe4b9e9e4d3a615c94a31b, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=288, earliestPutTs=1731329029746 2024-11-11T12:43:53,194 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3c5f27eac1364c15b65f5b4032834e5f, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1731329031599 2024-11-11T12:43:53,195 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 80b93f085a1e4275a7bccfdcba4b148c, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329032229 2024-11-11T12:43:53,211 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#461 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:53,212 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/ca332845dac44b0dbb4afae5fd8f4db5 is 50, key is test_row_0/C:col10/1731329032236/Put/seqid=0 2024-11-11T12:43:53,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742370_1546 (size=13085) 2024-11-11T12:43:53,240 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,240 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=141 2024-11-11T12:43:53,240 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:53,241 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:43:53,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:53,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:53,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:53,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:53,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:53,241 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:53,253 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/5417605e6f16439ca1ba8567ec7551e2 is 50, key is test_row_0/A:col10/1731329032684/Put/seqid=0 2024-11-11T12:43:53,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742371_1547 (size=12301) 2024-11-11T12:43:53,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:53,315 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:53,346 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,346 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329093344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,348 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329093344, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329093351, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,356 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 186 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329093354, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,365 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329093359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-11T12:43:53,451 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329093450, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 142 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329093456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,562 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/0b042e16391f4254919f896febe0fd10 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/0b042e16391f4254919f896febe0fd10 2024-11-11T12:43:53,566 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into 0b042e16391f4254919f896febe0fd10(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:53,566 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:53,566 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=13, startTime=1731329033110; duration=0sec 2024-11-11T12:43:53,566 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:53,566 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:53,630 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/ca332845dac44b0dbb4afae5fd8f4db5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ca332845dac44b0dbb4afae5fd8f4db5 2024-11-11T12:43:53,634 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into ca332845dac44b0dbb4afae5fd8f4db5(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:53,634 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:53,634 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=13, startTime=1731329033110; duration=0sec 2024-11-11T12:43:53,634 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:53,634 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:53,655 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329093654, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,667 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:53,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329093664, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:53,674 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/5417605e6f16439ca1ba8567ec7551e2 2024-11-11T12:43:53,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-11T12:43:53,702 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/3061b99555da4520aea13b499662991e is 50, key is test_row_0/B:col10/1731329032684/Put/seqid=0 2024-11-11T12:43:53,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742372_1548 (size=12301) 2024-11-11T12:43:53,752 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/3061b99555da4520aea13b499662991e 2024-11-11T12:43:53,786 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b478f89a90114ee085abc8941258bb0e is 50, key is test_row_0/C:col10/1731329032684/Put/seqid=0 2024-11-11T12:43:53,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742373_1549 (size=12301) 2024-11-11T12:43:53,813 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=352 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b478f89a90114ee085abc8941258bb0e 2024-11-11T12:43:53,820 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/5417605e6f16439ca1ba8567ec7551e2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/5417605e6f16439ca1ba8567ec7551e2 2024-11-11T12:43:53,826 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/5417605e6f16439ca1ba8567ec7551e2, entries=150, sequenceid=352, filesize=12.0 K 2024-11-11T12:43:53,832 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/3061b99555da4520aea13b499662991e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3061b99555da4520aea13b499662991e 2024-11-11T12:43:53,836 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3061b99555da4520aea13b499662991e, entries=150, sequenceid=352, filesize=12.0 K 2024-11-11T12:43:53,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b478f89a90114ee085abc8941258bb0e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b478f89a90114ee085abc8941258bb0e 2024-11-11T12:43:53,842 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b478f89a90114ee085abc8941258bb0e, entries=150, sequenceid=352, filesize=12.0 K 2024-11-11T12:43:53,843 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for bb21a7c6e49c779e06f46670f1405ab7 in 602ms, sequenceid=352, compaction requested=false 2024-11-11T12:43:53,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:53,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:53,843 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-11T12:43:53,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=141 2024-11-11T12:43:53,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=141, resume processing ppid=140 2024-11-11T12:43:53,845 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, ppid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 757 msec 2024-11-11T12:43:53,847 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=140, table=TestAcidGuarantees in 763 msec 2024-11-11T12:43:53,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:53,967 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:43:53,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:53,967 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:53,968 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:53,990 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/d0c5f90fd3c7454dad60e1a38ee1ef66 is 50, key is test_row_0/A:col10/1731329033328/Put/seqid=0 2024-11-11T12:43:54,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742374_1550 (size=14741) 2024-11-11T12:43:54,120 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329094116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,126 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329094120, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,198 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=140 2024-11-11T12:43:54,199 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 140 completed 2024-11-11T12:43:54,201 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:54,212 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees 2024-11-11T12:43:54,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-11T12:43:54,217 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:54,218 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=142, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:54,218 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=143, ppid=142, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:54,233 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329094225, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,235 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329094234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-11T12:43:54,363 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,363 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329094359, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329094357, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,371 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,371 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-11T12:43:54,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:54,371 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:54,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:54,372 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,377 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329094375, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/d0c5f90fd3c7454dad60e1a38ee1ef66 2024-11-11T12:43:54,439 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329094436, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,444 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329094440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/3ce471691d294ff182d436b234334446 is 50, key is test_row_0/B:col10/1731329033328/Put/seqid=0 2024-11-11T12:43:54,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742375_1551 (size=12301) 2024-11-11T12:43:54,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/3ce471691d294ff182d436b234334446 2024-11-11T12:43:54,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-11T12:43:54,530 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/d56d7848e925424bba0c6d2f1ba56104 is 50, key is test_row_0/C:col10/1731329033328/Put/seqid=0 2024-11-11T12:43:54,532 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,534 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-11T12:43:54,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:54,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:54,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:54,536 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,536 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,544 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742376_1552 (size=12301) 2024-11-11T12:43:54,704 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,712 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-11T12:43:54,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:54,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:54,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:54,716 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,716 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,750 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329094744, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,758 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:54,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 140 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329094754, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-11T12:43:54,880 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:54,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-11T12:43:54,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:54,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:54,881 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:54,882 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] handler.RSProcedureHandler(58): pid=143 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,882 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=143 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=143 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:54,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=368 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/d56d7848e925424bba0c6d2f1ba56104 2024-11-11T12:43:54,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/d0c5f90fd3c7454dad60e1a38ee1ef66 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d0c5f90fd3c7454dad60e1a38ee1ef66 2024-11-11T12:43:54,998 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d0c5f90fd3c7454dad60e1a38ee1ef66, entries=200, sequenceid=368, filesize=14.4 K 2024-11-11T12:43:54,999 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/3ce471691d294ff182d436b234334446 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3ce471691d294ff182d436b234334446 2024-11-11T12:43:55,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3ce471691d294ff182d436b234334446, entries=150, sequenceid=368, filesize=12.0 K 2024-11-11T12:43:55,003 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/d56d7848e925424bba0c6d2f1ba56104 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d56d7848e925424bba0c6d2f1ba56104 2024-11-11T12:43:55,007 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d56d7848e925424bba0c6d2f1ba56104, entries=150, sequenceid=368, filesize=12.0 K 2024-11-11T12:43:55,008 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=127.47 KB/130530 for bb21a7c6e49c779e06f46670f1405ab7 in 1041ms, sequenceid=368, compaction requested=true 2024-11-11T12:43:55,008 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:55,008 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:55,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:55,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:55,009 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:55,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:55,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:55,009 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40127 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:55,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:55,009 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:55,009 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:55,009 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:55,010 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/ed09f19183de4e20b73f08ae0bb12404, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/5417605e6f16439ca1ba8567ec7551e2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d0c5f90fd3c7454dad60e1a38ee1ef66] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=39.2 K 2024-11-11T12:43:55,010 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:55,010 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ed09f19183de4e20b73f08ae0bb12404, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329032229 2024-11-11T12:43:55,010 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:55,010 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:55,010 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/0b042e16391f4254919f896febe0fd10, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3061b99555da4520aea13b499662991e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3ce471691d294ff182d436b234334446] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=36.8 K 2024-11-11T12:43:55,011 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5417605e6f16439ca1ba8567ec7551e2, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1731329032684 2024-11-11T12:43:55,011 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b042e16391f4254919f896febe0fd10, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329032229 2024-11-11T12:43:55,011 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0c5f90fd3c7454dad60e1a38ee1ef66, keycount=200, bloomtype=ROW, size=14.4 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1731329033328 2024-11-11T12:43:55,011 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3061b99555da4520aea13b499662991e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1731329032684 2024-11-11T12:43:55,012 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3ce471691d294ff182d436b234334446, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1731329033328 2024-11-11T12:43:55,026 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#468 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:55,026 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/74186ff35993438fbfc50bd7faecd8b2 is 50, key is test_row_0/A:col10/1731329033328/Put/seqid=0 2024-11-11T12:43:55,031 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#469 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:55,032 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/d72e1533d8524e8789764af01debd3a6 is 50, key is test_row_0/B:col10/1731329033328/Put/seqid=0 2024-11-11T12:43:55,036 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:55,036 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=143 2024-11-11T12:43:55,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:55,037 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:43:55,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:55,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:55,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:55,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:55,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:55,037 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:55,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/674b63a88c1249eb8c82473ebe565ecf is 50, key is test_row_0/A:col10/1731329034013/Put/seqid=0 2024-11-11T12:43:55,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742377_1553 (size=13187) 2024-11-11T12:43:55,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742378_1554 (size=13187) 2024-11-11T12:43:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742379_1555 (size=12301) 2024-11-11T12:43:55,109 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/674b63a88c1249eb8c82473ebe565ecf 2024-11-11T12:43:55,111 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/d72e1533d8524e8789764af01debd3a6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d72e1533d8524e8789764af01debd3a6 2024-11-11T12:43:55,118 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into d72e1533d8524e8789764af01debd3a6(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:55,118 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:55,118 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=13, startTime=1731329035009; duration=0sec 2024-11-11T12:43:55,118 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:55,118 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:55,118 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:55,119 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:55,119 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:55,119 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:55,119 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ca332845dac44b0dbb4afae5fd8f4db5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b478f89a90114ee085abc8941258bb0e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d56d7848e925424bba0c6d2f1ba56104] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=36.8 K 2024-11-11T12:43:55,119 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ca332845dac44b0dbb4afae5fd8f4db5, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329032229 2024-11-11T12:43:55,120 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b478f89a90114ee085abc8941258bb0e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=352, earliestPutTs=1731329032684 2024-11-11T12:43:55,120 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d56d7848e925424bba0c6d2f1ba56104, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1731329033328 2024-11-11T12:43:55,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/b5ddd68b66ca4f54989b9146906ee703 is 50, key is test_row_0/B:col10/1731329034013/Put/seqid=0 2024-11-11T12:43:55,143 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#472 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:55,143 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7dca382c5b8746e180dd87d2b8170e59 is 50, key is test_row_0/C:col10/1731329033328/Put/seqid=0 2024-11-11T12:43:55,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742380_1556 (size=12301) 2024-11-11T12:43:55,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742381_1557 (size=13187) 2024-11-11T12:43:55,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:55,263 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:55,300 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:55,300 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:55,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329095291, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:55,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329095293, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:55,319 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-11T12:43:55,406 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:55,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329095402, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:55,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:55,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329095404, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:55,543 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/74186ff35993438fbfc50bd7faecd8b2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74186ff35993438fbfc50bd7faecd8b2 2024-11-11T12:43:55,565 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into 74186ff35993438fbfc50bd7faecd8b2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:55,565 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:55,565 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=13, startTime=1731329035008; duration=0sec 2024-11-11T12:43:55,565 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:55,565 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:55,571 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/b5ddd68b66ca4f54989b9146906ee703 2024-11-11T12:43:55,595 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/53ad33c24487407199a3ccb4d574b258 is 50, key is test_row_0/C:col10/1731329034013/Put/seqid=0 2024-11-11T12:43:55,600 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/7dca382c5b8746e180dd87d2b8170e59 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7dca382c5b8746e180dd87d2b8170e59 2024-11-11T12:43:55,611 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:55,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329095608, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:55,623 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:55,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329095620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:55,624 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into 7dca382c5b8746e180dd87d2b8170e59(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:55,624 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:55,624 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=13, startTime=1731329035009; duration=0sec 2024-11-11T12:43:55,624 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:55,624 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:55,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742382_1558 (size=12301) 2024-11-11T12:43:55,627 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=390 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/53ad33c24487407199a3ccb4d574b258 2024-11-11T12:43:55,631 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/674b63a88c1249eb8c82473ebe565ecf as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/674b63a88c1249eb8c82473ebe565ecf 2024-11-11T12:43:55,635 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/674b63a88c1249eb8c82473ebe565ecf, entries=150, sequenceid=390, filesize=12.0 K 2024-11-11T12:43:55,636 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/b5ddd68b66ca4f54989b9146906ee703 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b5ddd68b66ca4f54989b9146906ee703 2024-11-11T12:43:55,639 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b5ddd68b66ca4f54989b9146906ee703, entries=150, sequenceid=390, filesize=12.0 K 2024-11-11T12:43:55,639 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/53ad33c24487407199a3ccb4d574b258 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/53ad33c24487407199a3ccb4d574b258 2024-11-11T12:43:55,644 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/53ad33c24487407199a3ccb4d574b258, entries=150, sequenceid=390, filesize=12.0 K 2024-11-11T12:43:55,645 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for bb21a7c6e49c779e06f46670f1405ab7 in 608ms, sequenceid=390, compaction requested=false 2024-11-11T12:43:55,645 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:55,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:55,646 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-11-11T12:43:55,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-11-11T12:43:55,650 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=143, resume processing ppid=142 2024-11-11T12:43:55,650 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.4300 sec 2024-11-11T12:43:55,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=142, table=TestAcidGuarantees in 1.4490 sec 2024-11-11T12:43:55,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:55,918 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-11T12:43:55,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:55,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:55,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:55,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:55,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:55,919 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:55,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/6f2bc9cc46f24853bf6368fe39baa0b5 is 50, key is test_row_0/A:col10/1731329035916/Put/seqid=0 2024-11-11T12:43:55,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742383_1559 (size=12301) 2024-11-11T12:43:55,958 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/6f2bc9cc46f24853bf6368fe39baa0b5 2024-11-11T12:43:56,000 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 163 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329095992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,003 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329095994, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,007 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/99b8d76d001347f294174d42936d65ba is 50, key is test_row_0/B:col10/1731329035916/Put/seqid=0 2024-11-11T12:43:56,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742384_1560 (size=12301) 2024-11-11T12:43:56,053 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/99b8d76d001347f294174d42936d65ba 2024-11-11T12:43:56,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/adc4e7d7b3bd4dd4aae1d1d475eac98b is 50, key is test_row_0/C:col10/1731329035916/Put/seqid=0 2024-11-11T12:43:56,105 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 165 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329096102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329096104, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742385_1561 (size=12301) 2024-11-11T12:43:56,310 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 167 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329096307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,318 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329096312, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,320 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=142 2024-11-11T12:43:56,320 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 142 completed 2024-11-11T12:43:56,321 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:43:56,323 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees 2024-11-11T12:43:56,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-11T12:43:56,324 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:43:56,324 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=144, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:43:56,324 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:43:56,373 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,373 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43288 deadline: 1731329096368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,374 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43328 deadline: 1731329096368, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,374 DEBUG [Thread-2108 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4138 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., hostname=32e78532c8b1,44673,1731328897232, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:56,375 DEBUG [Thread-2114 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4144 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., hostname=32e78532c8b1,44673,1731328897232, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:56,398 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43370 deadline: 1731329096392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,398 DEBUG [Thread-2116 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4161 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_2' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., hostname=32e78532c8b1,44673,1731328897232, seqNum=2, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:43:56,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-11T12:43:56,480 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,480 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:56,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:56,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,482 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,482 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,525 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=408 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/adc4e7d7b3bd4dd4aae1d1d475eac98b 2024-11-11T12:43:56,529 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/6f2bc9cc46f24853bf6368fe39baa0b5 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6f2bc9cc46f24853bf6368fe39baa0b5 2024-11-11T12:43:56,568 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6f2bc9cc46f24853bf6368fe39baa0b5, entries=150, sequenceid=408, filesize=12.0 K 2024-11-11T12:43:56,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/99b8d76d001347f294174d42936d65ba as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/99b8d76d001347f294174d42936d65ba 2024-11-11T12:43:56,579 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/99b8d76d001347f294174d42936d65ba, entries=150, sequenceid=408, filesize=12.0 K 2024-11-11T12:43:56,580 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/adc4e7d7b3bd4dd4aae1d1d475eac98b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/adc4e7d7b3bd4dd4aae1d1d475eac98b 2024-11-11T12:43:56,592 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/adc4e7d7b3bd4dd4aae1d1d475eac98b, entries=150, sequenceid=408, filesize=12.0 K 2024-11-11T12:43:56,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=120.76 KB/123660 for bb21a7c6e49c779e06f46670f1405ab7 in 675ms, sequenceid=408, compaction requested=true 2024-11-11T12:43:56,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:56,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:43:56,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:56,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:43:56,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:56,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store bb21a7c6e49c779e06f46670f1405ab7:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:43:56,593 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:43:56,593 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:56,594 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:56,594 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/A is initiating minor compaction (all files) 2024-11-11T12:43:56,594 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/A in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,594 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74186ff35993438fbfc50bd7faecd8b2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/674b63a88c1249eb8c82473ebe565ecf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6f2bc9cc46f24853bf6368fe39baa0b5] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=36.9 K 2024-11-11T12:43:56,595 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 74186ff35993438fbfc50bd7faecd8b2, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1731329033328 2024-11-11T12:43:56,596 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:56,597 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 674b63a88c1249eb8c82473ebe565ecf, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1731329034013 2024-11-11T12:43:56,599 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 6f2bc9cc46f24853bf6368fe39baa0b5, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1731329035280 2024-11-11T12:43:56,601 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:56,601 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/B is initiating minor compaction (all files) 2024-11-11T12:43:56,601 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/B in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,601 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d72e1533d8524e8789764af01debd3a6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b5ddd68b66ca4f54989b9146906ee703, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/99b8d76d001347f294174d42936d65ba] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=36.9 K 2024-11-11T12:43:56,603 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d72e1533d8524e8789764af01debd3a6, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1731329033328 2024-11-11T12:43:56,604 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b5ddd68b66ca4f54989b9146906ee703, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1731329034013 2024-11-11T12:43:56,605 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 99b8d76d001347f294174d42936d65ba, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1731329035280 2024-11-11T12:43:56,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:56,618 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:43:56,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:56,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:56,618 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:56,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:56,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:56,619 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:56,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-11T12:43:56,630 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#A#compaction#477 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:56,630 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/d315f6b45e1b4e609358c4913097a4da is 50, key is test_row_0/A:col10/1731329035916/Put/seqid=0 2024-11-11T12:43:56,630 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#B#compaction#478 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:56,631 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/d16e3ba67aa8412e9a0193d756d69706 is 50, key is test_row_0/B:col10/1731329035916/Put/seqid=0 2024-11-11T12:43:56,636 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,636 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:56,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:56,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,637 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,637 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,637 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/055e34bfc0f34c5c8f898dfe734e33eb is 50, key is test_row_0/A:col10/1731329036615/Put/seqid=0 2024-11-11T12:43:56,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742386_1562 (size=13289) 2024-11-11T12:43:56,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742387_1563 (size=13289) 2024-11-11T12:43:56,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742388_1564 (size=14741) 2024-11-11T12:43:56,656 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/d315f6b45e1b4e609358c4913097a4da as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d315f6b45e1b4e609358c4913097a4da 2024-11-11T12:43:56,659 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/d16e3ba67aa8412e9a0193d756d69706 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d16e3ba67aa8412e9a0193d756d69706 2024-11-11T12:43:56,669 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/A of bb21a7c6e49c779e06f46670f1405ab7 into d315f6b45e1b4e609358c4913097a4da(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:56,669 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:56,669 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/A, priority=13, startTime=1731329036593; duration=0sec 2024-11-11T12:43:56,669 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:43:56,669 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:A 2024-11-11T12:43:56,669 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:43:56,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329096669, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,674 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329096670, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,677 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37789 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:43:56,677 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): bb21a7c6e49c779e06f46670f1405ab7/C is initiating minor compaction (all files) 2024-11-11T12:43:56,677 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of bb21a7c6e49c779e06f46670f1405ab7/C in TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,678 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7dca382c5b8746e180dd87d2b8170e59, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/53ad33c24487407199a3ccb4d574b258, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/adc4e7d7b3bd4dd4aae1d1d475eac98b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp, totalSize=36.9 K 2024-11-11T12:43:56,678 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/B of bb21a7c6e49c779e06f46670f1405ab7 into d16e3ba67aa8412e9a0193d756d69706(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:56,678 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:56,678 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/B, priority=13, startTime=1731329036593; duration=0sec 2024-11-11T12:43:56,678 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:56,678 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:B 2024-11-11T12:43:56,679 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7dca382c5b8746e180dd87d2b8170e59, keycount=150, bloomtype=ROW, size=12.9 K, encoding=NONE, compression=NONE, seqNum=368, earliestPutTs=1731329033328 2024-11-11T12:43:56,680 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 53ad33c24487407199a3ccb4d574b258, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=390, earliestPutTs=1731329034013 2024-11-11T12:43:56,682 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting adc4e7d7b3bd4dd4aae1d1d475eac98b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=408, earliestPutTs=1731329035280 2024-11-11T12:43:56,687 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): bb21a7c6e49c779e06f46670f1405ab7#C#compaction#480 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:43:56,688 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/edf8eb4cd2cc4dcc871b7d4cae387700 is 50, key is test_row_0/C:col10/1731329035916/Put/seqid=0 2024-11-11T12:43:56,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742389_1565 (size=13289) 2024-11-11T12:43:56,719 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/edf8eb4cd2cc4dcc871b7d4cae387700 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/edf8eb4cd2cc4dcc871b7d4cae387700 2024-11-11T12:43:56,752 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in bb21a7c6e49c779e06f46670f1405ab7/C of bb21a7c6e49c779e06f46670f1405ab7 into edf8eb4cd2cc4dcc871b7d4cae387700(size=13.0 K), total size for store is 13.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:43:56,752 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:56,752 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7., storeName=bb21a7c6e49c779e06f46670f1405ab7/C, priority=13, startTime=1731329036593; duration=0sec 2024-11-11T12:43:56,752 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:43:56,752 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: bb21a7c6e49c779e06f46670f1405ab7:C 2024-11-11T12:43:56,776 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329096775, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,780 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 196 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329096780, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,792 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,795 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:56,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:56,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,795 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-11T12:43:56,954 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,960 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:56,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:56,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:56,968 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,968 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:56,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329096979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:56,986 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:56,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 198 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329096982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,052 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/055e34bfc0f34c5c8f898dfe734e33eb 2024-11-11T12:43:57,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/ac55bc4163594ef5b3a539a15a3a3ea7 is 50, key is test_row_0/B:col10/1731329036615/Put/seqid=0 2024-11-11T12:43:57,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742390_1566 (size=12301) 2024-11-11T12:43:57,125 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:57,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:57,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,125 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,277 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:57,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:57,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,278 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,278 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,279 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:57,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 182 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329097288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,293 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:57,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 200 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329097290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,365 DEBUG [Thread-2119 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11193a0c to 127.0.0.1:54294 2024-11-11T12:43:57,365 DEBUG [Thread-2119 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:57,367 DEBUG [Thread-2127 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x054c2725 to 127.0.0.1:54294 2024-11-11T12:43:57,368 DEBUG [Thread-2127 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:57,370 DEBUG [Thread-2123 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x154f0f85 to 127.0.0.1:54294 2024-11-11T12:43:57,370 DEBUG [Thread-2123 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:57,372 DEBUG [Thread-2121 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7861b162 to 127.0.0.1:54294 2024-11-11T12:43:57,372 DEBUG [Thread-2121 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:57,372 DEBUG [Thread-2125 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x008a917b to 127.0.0.1:54294 2024-11-11T12:43:57,372 DEBUG [Thread-2125 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:57,433 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,433 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:57,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:57,433 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,433 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,434 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,440 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-11T12:43:57,491 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/ac55bc4163594ef5b3a539a15a3a3ea7 2024-11-11T12:43:57,497 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b7ee0fd5e92446f08b4f466d80c7f07c is 50, key is test_row_0/C:col10/1731329036615/Put/seqid=0 2024-11-11T12:43:57,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742391_1567 (size=12301) 2024-11-11T12:43:57,585 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,586 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:57,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:57,586 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,586 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,587 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,739 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,739 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:57,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,739 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:57,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,740 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,740 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,797 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:57,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 184 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43316 deadline: 1731329097797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,798 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:43:57,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 202 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:43344 deadline: 1731329097798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,892 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:57,892 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:57,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,892 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:57,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:57,893 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] handler.RSProcedureHandler(58): pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,893 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=145 java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=145 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:43:57,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.49 KB at sequenceid=430 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b7ee0fd5e92446f08b4f466d80c7f07c 2024-11-11T12:43:57,904 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/055e34bfc0f34c5c8f898dfe734e33eb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/055e34bfc0f34c5c8f898dfe734e33eb 2024-11-11T12:43:57,907 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/055e34bfc0f34c5c8f898dfe734e33eb, entries=200, sequenceid=430, filesize=14.4 K 2024-11-11T12:43:57,908 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/ac55bc4163594ef5b3a539a15a3a3ea7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/ac55bc4163594ef5b3a539a15a3a3ea7 2024-11-11T12:43:57,911 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/ac55bc4163594ef5b3a539a15a3a3ea7, entries=150, sequenceid=430, filesize=12.0 K 2024-11-11T12:43:57,911 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/b7ee0fd5e92446f08b4f466d80c7f07c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b7ee0fd5e92446f08b4f466d80c7f07c 2024-11-11T12:43:57,914 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b7ee0fd5e92446f08b4f466d80c7f07c, entries=150, sequenceid=430, filesize=12.0 K 2024-11-11T12:43:57,915 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~127.47 KB/130530, heapSize ~334.69 KB/342720, currentSize=73.80 KB/75570 for bb21a7c6e49c779e06f46670f1405ab7 in 1297ms, sequenceid=430, compaction requested=false 2024-11-11T12:43:57,915 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:58,045 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:43:58,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=145 2024-11-11T12:43:58,045 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:58,045 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=73.80 KB heapSize=194.11 KB 2024-11-11T12:43:58,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:43:58,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:58,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:43:58,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:58,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:43:58,046 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:43:58,050 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/1dff8a4f402c4b588479317d9d803d5f is 50, key is test_row_0/A:col10/1731329036657/Put/seqid=0 2024-11-11T12:43:58,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742392_1568 (size=12301) 2024-11-11T12:43:58,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-11T12:43:58,455 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/1dff8a4f402c4b588479317d9d803d5f 2024-11-11T12:43:58,461 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/5e551b513faa44b7aea361d53f8f1a6f is 50, key is test_row_0/B:col10/1731329036657/Put/seqid=0 2024-11-11T12:43:58,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742393_1569 (size=12301) 2024-11-11T12:43:58,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:43:58,808 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. as already flushing 2024-11-11T12:43:58,808 DEBUG [Thread-2112 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x032bb71c to 127.0.0.1:54294 2024-11-11T12:43:58,808 DEBUG [Thread-2112 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:58,809 DEBUG [Thread-2110 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5ef40578 to 127.0.0.1:54294 2024-11-11T12:43:58,809 DEBUG [Thread-2110 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:43:58,865 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/5e551b513faa44b7aea361d53f8f1a6f 2024-11-11T12:43:58,871 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/eff062647d6e47e3ac4d3e4866b5fb8f is 50, key is test_row_0/C:col10/1731329036657/Put/seqid=0 2024-11-11T12:43:58,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742394_1570 (size=12301) 2024-11-11T12:43:59,276 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.60 KB at sequenceid=447 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/eff062647d6e47e3ac4d3e4866b5fb8f 2024-11-11T12:43:59,280 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/1dff8a4f402c4b588479317d9d803d5f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/1dff8a4f402c4b588479317d9d803d5f 2024-11-11T12:43:59,283 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/1dff8a4f402c4b588479317d9d803d5f, entries=150, sequenceid=447, filesize=12.0 K 2024-11-11T12:43:59,284 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/5e551b513faa44b7aea361d53f8f1a6f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/5e551b513faa44b7aea361d53f8f1a6f 2024-11-11T12:43:59,288 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/5e551b513faa44b7aea361d53f8f1a6f, entries=150, sequenceid=447, filesize=12.0 K 2024-11-11T12:43:59,288 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/eff062647d6e47e3ac4d3e4866b5fb8f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eff062647d6e47e3ac4d3e4866b5fb8f 2024-11-11T12:43:59,291 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eff062647d6e47e3ac4d3e4866b5fb8f, entries=150, sequenceid=447, filesize=12.0 K 2024-11-11T12:43:59,292 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~73.80 KB/75570, heapSize ~194.06 KB/198720, currentSize=13.42 KB/13740 for bb21a7c6e49c779e06f46670f1405ab7 in 1247ms, sequenceid=447, compaction requested=true 2024-11-11T12:43:59,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:43:59,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:43:59,292 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-11-11T12:43:59,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-11-11T12:43:59,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-11-11T12:43:59,294 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.9690 sec 2024-11-11T12:43:59,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=144, table=TestAcidGuarantees in 2.9740 sec 2024-11-11T12:44:00,383 DEBUG [Thread-2108 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x32c12a30 to 127.0.0.1:54294 2024-11-11T12:44:00,383 DEBUG [Thread-2108 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:00,397 DEBUG [Thread-2114 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x06bc0f7c to 127.0.0.1:54294 2024-11-11T12:44:00,397 DEBUG [Thread-2114 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:00,431 DEBUG [Thread-2116 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1b8b6e04 to 127.0.0.1:54294 2024-11-11T12:44:00,431 DEBUG [Thread-2116 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:00,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-11-11T12:44:00,442 INFO [Thread-2118 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 144 completed 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 75 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 83 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 77 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 69 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 51 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1756 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5268 rows 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1751 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5253 rows 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1756 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5268 rows 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1769 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5307 rows 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(402): scanned 1754 2024-11-11T12:44:00,442 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(403): verified 5262 rows 2024-11-11T12:44:00,442 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T12:44:00,442 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x022a6e9f to 127.0.0.1:54294 2024-11-11T12:44:00,443 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:00,444 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-11T12:44:00,445 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-11T12:44:00,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=146, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:00,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-11T12:44:00,451 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329040451"}]},"ts":"1731329040451"} 2024-11-11T12:44:00,453 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-11T12:44:00,455 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-11T12:44:00,456 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=147, ppid=146, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:44:00,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb21a7c6e49c779e06f46670f1405ab7, UNASSIGN}] 2024-11-11T12:44:00,457 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb21a7c6e49c779e06f46670f1405ab7, UNASSIGN 2024-11-11T12:44:00,458 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=bb21a7c6e49c779e06f46670f1405ab7, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:00,459 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:44:00,459 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE; CloseRegionProcedure bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:44:00,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-11T12:44:00,610 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:00,611 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(124): Close bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:44:00,611 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:44:00,611 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1681): Closing bb21a7c6e49c779e06f46670f1405ab7, disabling compactions & flushes 2024-11-11T12:44:00,611 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:44:00,611 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:44:00,611 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. after waiting 0 ms 2024-11-11T12:44:00,611 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:44:00,611 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(2837): Flushing bb21a7c6e49c779e06f46670f1405ab7 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-11T12:44:00,612 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=A 2024-11-11T12:44:00,612 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:00,612 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=B 2024-11-11T12:44:00,612 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:00,612 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactingMemStore(205): FLUSHING TO DISK bb21a7c6e49c779e06f46670f1405ab7, store=C 2024-11-11T12:44:00,612 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:00,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/de00b4ab247a4961b0ffff3df4b64c70 is 50, key is test_row_0/A:col10/1731329038808/Put/seqid=0 2024-11-11T12:44:00,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742395_1571 (size=12301) 2024-11-11T12:44:00,621 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/de00b4ab247a4961b0ffff3df4b64c70 2024-11-11T12:44:00,627 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/86de876264324b01becd4519d4765add is 50, key is test_row_0/B:col10/1731329038808/Put/seqid=0 2024-11-11T12:44:00,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742396_1572 (size=12301) 2024-11-11T12:44:00,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-11T12:44:01,041 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/86de876264324b01becd4519d4765add 2024-11-11T12:44:01,048 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/811cef7e7930471abaae34e5be3acdeb is 50, key is test_row_0/C:col10/1731329038808/Put/seqid=0 2024-11-11T12:44:01,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-11T12:44:01,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742397_1573 (size=12301) 2024-11-11T12:44:01,477 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=455 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/811cef7e7930471abaae34e5be3acdeb 2024-11-11T12:44:01,482 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/A/de00b4ab247a4961b0ffff3df4b64c70 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/de00b4ab247a4961b0ffff3df4b64c70 2024-11-11T12:44:01,485 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/de00b4ab247a4961b0ffff3df4b64c70, entries=150, sequenceid=455, filesize=12.0 K 2024-11-11T12:44:01,486 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/B/86de876264324b01becd4519d4765add as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/86de876264324b01becd4519d4765add 2024-11-11T12:44:01,489 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/86de876264324b01becd4519d4765add, entries=150, sequenceid=455, filesize=12.0 K 2024-11-11T12:44:01,490 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/.tmp/C/811cef7e7930471abaae34e5be3acdeb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/811cef7e7930471abaae34e5be3acdeb 2024-11-11T12:44:01,493 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/811cef7e7930471abaae34e5be3acdeb, entries=150, sequenceid=455, filesize=12.0 K 2024-11-11T12:44:01,494 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for bb21a7c6e49c779e06f46670f1405ab7 in 883ms, sequenceid=455, compaction requested=true 2024-11-11T12:44:01,495 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d8a00173f9574c8f9e132ded7dfc9479, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8fe96b715b7b4c84a36b5aadc45c0ed6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/48a6efacb62642f0b01421ba9638563a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3fbdd66064564903ba371ce4434f864d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/17d8120645be419593766fd7590ee8f5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3f8a53777c114e939778c04548312574, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/eb45d03598c74bf1912847c6708361b6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/fddd528aa5884936bee502e348ba64a8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3160c30e79144c0abfe685bf0b28de38, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/44fcc0e125a94195a557bbac604e7cc5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2d128d3548b84f4089afbef04bae09b3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/b090613e08784b8198a1a53211a3ab35, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6d58241da9eb42d0b0974011e3f4722a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/104be03a28ee4f9a87505c32174e8dad, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/bed4079f101549b3a4b661ec74385a74, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74e0f9e0362247d7acb0841450d6d1c1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/721e278b88c74e298169a23ba2f7fc55, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8b770bcad69b4c8893b234696f58c5e5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/65499c2c9796448bbddf8930221327bb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3cfffa8f83d743fbb04ae68ec0fae325, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/cdc012975751491f8f7ae61d7144f7e6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2aa58c31c8d344ffac7048ee4a3d9102, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3327f06c29634decb58f27682f41ec12, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/ed09f19183de4e20b73f08ae0bb12404, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/5417605e6f16439ca1ba8567ec7551e2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d0c5f90fd3c7454dad60e1a38ee1ef66, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74186ff35993438fbfc50bd7faecd8b2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/674b63a88c1249eb8c82473ebe565ecf, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6f2bc9cc46f24853bf6368fe39baa0b5] to archive 2024-11-11T12:44:01,496 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:44:01,497 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d8a00173f9574c8f9e132ded7dfc9479 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d8a00173f9574c8f9e132ded7dfc9479 2024-11-11T12:44:01,512 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8fe96b715b7b4c84a36b5aadc45c0ed6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8fe96b715b7b4c84a36b5aadc45c0ed6 2024-11-11T12:44:01,514 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/48a6efacb62642f0b01421ba9638563a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/48a6efacb62642f0b01421ba9638563a 2024-11-11T12:44:01,515 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3fbdd66064564903ba371ce4434f864d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3fbdd66064564903ba371ce4434f864d 2024-11-11T12:44:01,516 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/17d8120645be419593766fd7590ee8f5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/17d8120645be419593766fd7590ee8f5 2024-11-11T12:44:01,518 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3f8a53777c114e939778c04548312574 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3f8a53777c114e939778c04548312574 2024-11-11T12:44:01,519 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/eb45d03598c74bf1912847c6708361b6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/eb45d03598c74bf1912847c6708361b6 2024-11-11T12:44:01,520 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/fddd528aa5884936bee502e348ba64a8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/fddd528aa5884936bee502e348ba64a8 2024-11-11T12:44:01,521 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3160c30e79144c0abfe685bf0b28de38 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3160c30e79144c0abfe685bf0b28de38 2024-11-11T12:44:01,522 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/44fcc0e125a94195a557bbac604e7cc5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/44fcc0e125a94195a557bbac604e7cc5 2024-11-11T12:44:01,523 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2d128d3548b84f4089afbef04bae09b3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2d128d3548b84f4089afbef04bae09b3 2024-11-11T12:44:01,525 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/b090613e08784b8198a1a53211a3ab35 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/b090613e08784b8198a1a53211a3ab35 2024-11-11T12:44:01,526 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6d58241da9eb42d0b0974011e3f4722a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6d58241da9eb42d0b0974011e3f4722a 2024-11-11T12:44:01,527 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/104be03a28ee4f9a87505c32174e8dad to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/104be03a28ee4f9a87505c32174e8dad 2024-11-11T12:44:01,529 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/bed4079f101549b3a4b661ec74385a74 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/bed4079f101549b3a4b661ec74385a74 2024-11-11T12:44:01,530 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74e0f9e0362247d7acb0841450d6d1c1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74e0f9e0362247d7acb0841450d6d1c1 2024-11-11T12:44:01,531 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/721e278b88c74e298169a23ba2f7fc55 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/721e278b88c74e298169a23ba2f7fc55 2024-11-11T12:44:01,533 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8b770bcad69b4c8893b234696f58c5e5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/8b770bcad69b4c8893b234696f58c5e5 2024-11-11T12:44:01,536 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/65499c2c9796448bbddf8930221327bb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/65499c2c9796448bbddf8930221327bb 2024-11-11T12:44:01,537 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3cfffa8f83d743fbb04ae68ec0fae325 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3cfffa8f83d743fbb04ae68ec0fae325 2024-11-11T12:44:01,538 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/cdc012975751491f8f7ae61d7144f7e6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/cdc012975751491f8f7ae61d7144f7e6 2024-11-11T12:44:01,539 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2aa58c31c8d344ffac7048ee4a3d9102 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/2aa58c31c8d344ffac7048ee4a3d9102 2024-11-11T12:44:01,541 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3327f06c29634decb58f27682f41ec12 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/3327f06c29634decb58f27682f41ec12 2024-11-11T12:44:01,542 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/ed09f19183de4e20b73f08ae0bb12404 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/ed09f19183de4e20b73f08ae0bb12404 2024-11-11T12:44:01,543 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/5417605e6f16439ca1ba8567ec7551e2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/5417605e6f16439ca1ba8567ec7551e2 2024-11-11T12:44:01,544 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d0c5f90fd3c7454dad60e1a38ee1ef66 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d0c5f90fd3c7454dad60e1a38ee1ef66 2024-11-11T12:44:01,546 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74186ff35993438fbfc50bd7faecd8b2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/74186ff35993438fbfc50bd7faecd8b2 2024-11-11T12:44:01,547 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/674b63a88c1249eb8c82473ebe565ecf to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/674b63a88c1249eb8c82473ebe565ecf 2024-11-11T12:44:01,548 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6f2bc9cc46f24853bf6368fe39baa0b5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/6f2bc9cc46f24853bf6368fe39baa0b5 2024-11-11T12:44:01,549 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a872c7cfb1e64f89b29795744e1abece, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9e7e70e3a49447b28389a836567ebaec, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9152c670f593481485fd83a14cb993d2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/1e9179df896a4d4a8e258adfa540a56e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a786f38b60ad4d74995d5e6824939bb0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9244b3e521d2498d842b74775e7dec1d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/c3509f04ef3a4dd0ae92004d913cf311, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/80b06921640b40239a6dd4bb30fe1c5c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/24fb282847af49ed95e58b919e5966b1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/e7aacf346f15438dbe7b2c4892f84f07, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/461a5146e71c49d08f47fd7aa348a852, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/aab5e0ae3b904da4831b89e17e24156a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b0679d4522994906a92827086ef8ba04, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b1f49007e7f346de8ab9a47370fe766a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/f79b13db358d43fc9308317c66bc64b9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/befe2f39033c4203a9ae1142258cde78, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/6944fa711a724984a6858de46b5f96b9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a7a049f896bd44faa68c8f6bf0068e46, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/629c00a3acc44196bc340eb515b0ced9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8d32d679d71a47e4a92f3d8d5a238c8b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8833bb137bdf4b699bd60fe780f583bc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/bee2e074daa94086932d4845f13a441e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/0b042e16391f4254919f896febe0fd10, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d65d46c378934e008d5e8c13b676c234, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3061b99555da4520aea13b499662991e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d72e1533d8524e8789764af01debd3a6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3ce471691d294ff182d436b234334446, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b5ddd68b66ca4f54989b9146906ee703, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/99b8d76d001347f294174d42936d65ba] to archive 2024-11-11T12:44:01,550 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:44:01,552 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a872c7cfb1e64f89b29795744e1abece to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a872c7cfb1e64f89b29795744e1abece 2024-11-11T12:44:01,553 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9e7e70e3a49447b28389a836567ebaec to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9e7e70e3a49447b28389a836567ebaec 2024-11-11T12:44:01,554 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9152c670f593481485fd83a14cb993d2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9152c670f593481485fd83a14cb993d2 2024-11-11T12:44:01,555 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/1e9179df896a4d4a8e258adfa540a56e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/1e9179df896a4d4a8e258adfa540a56e 2024-11-11T12:44:01,556 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a786f38b60ad4d74995d5e6824939bb0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a786f38b60ad4d74995d5e6824939bb0 2024-11-11T12:44:01,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-11T12:44:01,557 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9244b3e521d2498d842b74775e7dec1d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/9244b3e521d2498d842b74775e7dec1d 2024-11-11T12:44:01,559 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/c3509f04ef3a4dd0ae92004d913cf311 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/c3509f04ef3a4dd0ae92004d913cf311 2024-11-11T12:44:01,560 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/80b06921640b40239a6dd4bb30fe1c5c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/80b06921640b40239a6dd4bb30fe1c5c 2024-11-11T12:44:01,561 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/24fb282847af49ed95e58b919e5966b1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/24fb282847af49ed95e58b919e5966b1 2024-11-11T12:44:01,562 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/e7aacf346f15438dbe7b2c4892f84f07 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/e7aacf346f15438dbe7b2c4892f84f07 2024-11-11T12:44:01,564 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/461a5146e71c49d08f47fd7aa348a852 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/461a5146e71c49d08f47fd7aa348a852 2024-11-11T12:44:01,565 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/aab5e0ae3b904da4831b89e17e24156a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/aab5e0ae3b904da4831b89e17e24156a 2024-11-11T12:44:01,566 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b0679d4522994906a92827086ef8ba04 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b0679d4522994906a92827086ef8ba04 2024-11-11T12:44:01,567 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b1f49007e7f346de8ab9a47370fe766a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b1f49007e7f346de8ab9a47370fe766a 2024-11-11T12:44:01,568 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/f79b13db358d43fc9308317c66bc64b9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/f79b13db358d43fc9308317c66bc64b9 2024-11-11T12:44:01,569 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/befe2f39033c4203a9ae1142258cde78 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/befe2f39033c4203a9ae1142258cde78 2024-11-11T12:44:01,571 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/6944fa711a724984a6858de46b5f96b9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/6944fa711a724984a6858de46b5f96b9 2024-11-11T12:44:01,572 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a7a049f896bd44faa68c8f6bf0068e46 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/a7a049f896bd44faa68c8f6bf0068e46 2024-11-11T12:44:01,573 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/629c00a3acc44196bc340eb515b0ced9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/629c00a3acc44196bc340eb515b0ced9 2024-11-11T12:44:01,574 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8d32d679d71a47e4a92f3d8d5a238c8b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8d32d679d71a47e4a92f3d8d5a238c8b 2024-11-11T12:44:01,575 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8833bb137bdf4b699bd60fe780f583bc to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/8833bb137bdf4b699bd60fe780f583bc 2024-11-11T12:44:01,576 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/bee2e074daa94086932d4845f13a441e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/bee2e074daa94086932d4845f13a441e 2024-11-11T12:44:01,578 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/0b042e16391f4254919f896febe0fd10 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/0b042e16391f4254919f896febe0fd10 2024-11-11T12:44:01,579 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d65d46c378934e008d5e8c13b676c234 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d65d46c378934e008d5e8c13b676c234 2024-11-11T12:44:01,580 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3061b99555da4520aea13b499662991e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3061b99555da4520aea13b499662991e 2024-11-11T12:44:01,581 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d72e1533d8524e8789764af01debd3a6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d72e1533d8524e8789764af01debd3a6 2024-11-11T12:44:01,582 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3ce471691d294ff182d436b234334446 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/3ce471691d294ff182d436b234334446 2024-11-11T12:44:01,584 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b5ddd68b66ca4f54989b9146906ee703 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/b5ddd68b66ca4f54989b9146906ee703 2024-11-11T12:44:01,585 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/99b8d76d001347f294174d42936d65ba to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/99b8d76d001347f294174d42936d65ba 2024-11-11T12:44:01,586 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ba67adc9fc4c4485a3f931d41cdb1ee0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/84c574406a8943e6bfdaebac5112da20, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7e4e1b4d6a514b5f996a9522d60322dc, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e90c5b81e644ff085e35c13b62ad16e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/4dbdd1647f614ad38e793ee9cb2a5c81, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/da95abf61bd44245999c6de3884a1ec7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eee4effec147420cb2d6e18a7153b3eb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7bfacc10f4674aca93b6ca9b8308dffa, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/f3a9a09b86a142f295d3780bde9923ea, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b22b74314a114643b4c6174c07170146, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c65513df1f404fbc9022112ff6737eef, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/cf3b63739620498b82254002ab926fd9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/062971a4d76d481199bc00b04ffcf5b2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/8d424cba938c4b3f9ac46999c74e8b44, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c622380714a44950bf9f4ddc78d11127, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7aacbea74a74432491958c71cdabf3f6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b46b7194b3784a6c8f95895287703cbd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/32e973207ce24050a8e0e8c81ef447ed, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d16ccf50c3794f09a9d939fdb43841f1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e22284d06fe4b9e9e4d3a615c94a31b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/318a0df74846429fadae6f2e2fb9a5a2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/3c5f27eac1364c15b65f5b4032834e5f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ca332845dac44b0dbb4afae5fd8f4db5, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/80b93f085a1e4275a7bccfdcba4b148c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b478f89a90114ee085abc8941258bb0e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7dca382c5b8746e180dd87d2b8170e59, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d56d7848e925424bba0c6d2f1ba56104, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/53ad33c24487407199a3ccb4d574b258, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/adc4e7d7b3bd4dd4aae1d1d475eac98b] to archive 2024-11-11T12:44:01,587 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:44:01,588 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ba67adc9fc4c4485a3f931d41cdb1ee0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ba67adc9fc4c4485a3f931d41cdb1ee0 2024-11-11T12:44:01,589 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/84c574406a8943e6bfdaebac5112da20 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/84c574406a8943e6bfdaebac5112da20 2024-11-11T12:44:01,590 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7e4e1b4d6a514b5f996a9522d60322dc to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7e4e1b4d6a514b5f996a9522d60322dc 2024-11-11T12:44:01,591 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e90c5b81e644ff085e35c13b62ad16e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e90c5b81e644ff085e35c13b62ad16e 2024-11-11T12:44:01,592 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/4dbdd1647f614ad38e793ee9cb2a5c81 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/4dbdd1647f614ad38e793ee9cb2a5c81 2024-11-11T12:44:01,593 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/da95abf61bd44245999c6de3884a1ec7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/da95abf61bd44245999c6de3884a1ec7 2024-11-11T12:44:01,594 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eee4effec147420cb2d6e18a7153b3eb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eee4effec147420cb2d6e18a7153b3eb 2024-11-11T12:44:01,595 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7bfacc10f4674aca93b6ca9b8308dffa to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7bfacc10f4674aca93b6ca9b8308dffa 2024-11-11T12:44:01,596 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/f3a9a09b86a142f295d3780bde9923ea to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/f3a9a09b86a142f295d3780bde9923ea 2024-11-11T12:44:01,598 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b22b74314a114643b4c6174c07170146 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b22b74314a114643b4c6174c07170146 2024-11-11T12:44:01,598 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c65513df1f404fbc9022112ff6737eef to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c65513df1f404fbc9022112ff6737eef 2024-11-11T12:44:01,599 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/cf3b63739620498b82254002ab926fd9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/cf3b63739620498b82254002ab926fd9 2024-11-11T12:44:01,601 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/062971a4d76d481199bc00b04ffcf5b2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/062971a4d76d481199bc00b04ffcf5b2 2024-11-11T12:44:01,602 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/8d424cba938c4b3f9ac46999c74e8b44 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/8d424cba938c4b3f9ac46999c74e8b44 2024-11-11T12:44:01,603 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c622380714a44950bf9f4ddc78d11127 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/c622380714a44950bf9f4ddc78d11127 2024-11-11T12:44:01,608 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7aacbea74a74432491958c71cdabf3f6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7aacbea74a74432491958c71cdabf3f6 2024-11-11T12:44:01,609 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b46b7194b3784a6c8f95895287703cbd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b46b7194b3784a6c8f95895287703cbd 2024-11-11T12:44:01,611 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/32e973207ce24050a8e0e8c81ef447ed to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/32e973207ce24050a8e0e8c81ef447ed 2024-11-11T12:44:01,612 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d16ccf50c3794f09a9d939fdb43841f1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d16ccf50c3794f09a9d939fdb43841f1 2024-11-11T12:44:01,613 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e22284d06fe4b9e9e4d3a615c94a31b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/1e22284d06fe4b9e9e4d3a615c94a31b 2024-11-11T12:44:01,614 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/318a0df74846429fadae6f2e2fb9a5a2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/318a0df74846429fadae6f2e2fb9a5a2 2024-11-11T12:44:01,616 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/3c5f27eac1364c15b65f5b4032834e5f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/3c5f27eac1364c15b65f5b4032834e5f 2024-11-11T12:44:01,617 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ca332845dac44b0dbb4afae5fd8f4db5 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/ca332845dac44b0dbb4afae5fd8f4db5 2024-11-11T12:44:01,618 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/80b93f085a1e4275a7bccfdcba4b148c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/80b93f085a1e4275a7bccfdcba4b148c 2024-11-11T12:44:01,619 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b478f89a90114ee085abc8941258bb0e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b478f89a90114ee085abc8941258bb0e 2024-11-11T12:44:01,621 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7dca382c5b8746e180dd87d2b8170e59 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/7dca382c5b8746e180dd87d2b8170e59 2024-11-11T12:44:01,622 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d56d7848e925424bba0c6d2f1ba56104 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/d56d7848e925424bba0c6d2f1ba56104 2024-11-11T12:44:01,623 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/53ad33c24487407199a3ccb4d574b258 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/53ad33c24487407199a3ccb4d574b258 2024-11-11T12:44:01,624 DEBUG [StoreCloser-TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/adc4e7d7b3bd4dd4aae1d1d475eac98b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/adc4e7d7b3bd4dd4aae1d1d475eac98b 2024-11-11T12:44:01,629 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/recovered.edits/458.seqid, newMaxSeqId=458, maxSeqId=1 2024-11-11T12:44:01,630 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7. 2024-11-11T12:44:01,632 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] regionserver.HRegion(1635): Region close journal for bb21a7c6e49c779e06f46670f1405ab7: 2024-11-11T12:44:01,636 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=149}] handler.UnassignRegionHandler(170): Closed bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:44:01,637 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=bb21a7c6e49c779e06f46670f1405ab7, regionState=CLOSED 2024-11-11T12:44:01,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=149, resume processing ppid=148 2024-11-11T12:44:01,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=148, state=SUCCESS; CloseRegionProcedure bb21a7c6e49c779e06f46670f1405ab7, server=32e78532c8b1,44673,1731328897232 in 1.1790 sec 2024-11-11T12:44:01,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-11-11T12:44:01,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=bb21a7c6e49c779e06f46670f1405ab7, UNASSIGN in 1.1820 sec 2024-11-11T12:44:01,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=147, resume processing ppid=146 2024-11-11T12:44:01,643 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, ppid=146, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.1860 sec 2024-11-11T12:44:01,644 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329041644"}]},"ts":"1731329041644"} 2024-11-11T12:44:01,645 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-11T12:44:01,649 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-11T12:44:01,650 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.2040 sec 2024-11-11T12:44:02,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=146 2024-11-11T12:44:02,558 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 146 completed 2024-11-11T12:44:02,559 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-11T12:44:02,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=150, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:02,561 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=150, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:02,561 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=150, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:02,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-11T12:44:02,564 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:44:02,566 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/recovered.edits] 2024-11-11T12:44:02,568 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/055e34bfc0f34c5c8f898dfe734e33eb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/055e34bfc0f34c5c8f898dfe734e33eb 2024-11-11T12:44:02,569 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/1dff8a4f402c4b588479317d9d803d5f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/1dff8a4f402c4b588479317d9d803d5f 2024-11-11T12:44:02,570 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d315f6b45e1b4e609358c4913097a4da to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/d315f6b45e1b4e609358c4913097a4da 2024-11-11T12:44:02,571 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/de00b4ab247a4961b0ffff3df4b64c70 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/A/de00b4ab247a4961b0ffff3df4b64c70 2024-11-11T12:44:02,573 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/5e551b513faa44b7aea361d53f8f1a6f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/5e551b513faa44b7aea361d53f8f1a6f 2024-11-11T12:44:02,574 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/86de876264324b01becd4519d4765add to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/86de876264324b01becd4519d4765add 2024-11-11T12:44:02,577 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/ac55bc4163594ef5b3a539a15a3a3ea7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/ac55bc4163594ef5b3a539a15a3a3ea7 2024-11-11T12:44:02,578 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d16e3ba67aa8412e9a0193d756d69706 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/B/d16e3ba67aa8412e9a0193d756d69706 2024-11-11T12:44:02,581 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/811cef7e7930471abaae34e5be3acdeb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/811cef7e7930471abaae34e5be3acdeb 2024-11-11T12:44:02,582 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b7ee0fd5e92446f08b4f466d80c7f07c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/b7ee0fd5e92446f08b4f466d80c7f07c 2024-11-11T12:44:02,583 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/edf8eb4cd2cc4dcc871b7d4cae387700 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/edf8eb4cd2cc4dcc871b7d4cae387700 2024-11-11T12:44:02,585 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eff062647d6e47e3ac4d3e4866b5fb8f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/C/eff062647d6e47e3ac4d3e4866b5fb8f 2024-11-11T12:44:02,587 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/recovered.edits/458.seqid to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7/recovered.edits/458.seqid 2024-11-11T12:44:02,588 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/bb21a7c6e49c779e06f46670f1405ab7 2024-11-11T12:44:02,588 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-11T12:44:02,593 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=150, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:02,599 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-11T12:44:02,601 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-11T12:44:02,602 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=150, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:02,602 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-11T12:44:02,602 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731329042602"}]},"ts":"9223372036854775807"} 2024-11-11T12:44:02,609 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-11T12:44:02,609 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => bb21a7c6e49c779e06f46670f1405ab7, NAME => 'TestAcidGuarantees,,1731329015170.bb21a7c6e49c779e06f46670f1405ab7.', STARTKEY => '', ENDKEY => ''}] 2024-11-11T12:44:02,609 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-11T12:44:02,610 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731329042609"}]},"ts":"9223372036854775807"} 2024-11-11T12:44:02,611 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-11T12:44:02,614 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=150, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:02,615 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 55 msec 2024-11-11T12:44:02,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=150 2024-11-11T12:44:02,663 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 150 completed 2024-11-11T12:44:02,674 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testScanAtomicity Thread=238 (was 234) - Thread LEAK? -, OpenFileDescriptor=453 (was 455), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=848 (was 871), ProcessCount=9 (was 9), AvailableMemoryMB=2431 (was 2362) - AvailableMemoryMB LEAK? - 2024-11-11T12:44:02,686 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=238, OpenFileDescriptor=453, MaxFileDescriptor=1048576, SystemLoadAverage=848, ProcessCount=9, AvailableMemoryMB=2430 2024-11-11T12:44:02,687 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:44:02,688 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.3 create 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:44:02,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=151, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:02,689 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_PRE_OPERATION 2024-11-11T12:44:02,690 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:02,690 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.3 procedure request for creating table: namespace: "default" qualifier: "TestAcidGuarantees" procId is: 151 2024-11-11T12:44:02,690 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-11T12:44:02,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-11T12:44:02,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742398_1574 (size=960) 2024-11-11T12:44:02,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-11T12:44:02,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-11T12:44:03,099 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18 2024-11-11T12:44:03,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742399_1575 (size=53) 2024-11-11T12:44:03,121 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:44:03,121 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1681): Closing cc6fe7bde4d6aa548700eb200610e776, disabling compactions & flushes 2024-11-11T12:44:03,121 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:03,121 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:03,121 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. after waiting 0 ms 2024-11-11T12:44:03,121 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:03,121 INFO [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:03,121 DEBUG [RegionOpenAndInit-TestAcidGuarantees-pool-0 {}] regionserver.HRegion(1635): Region close journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:03,122 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ADD_TO_META 2024-11-11T12:44:03,123 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.","families":{"info":[{"qualifier":"regioninfo","vlen":52,"tag":[],"timestamp":"1731329043122"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731329043122"}]},"ts":"1731329043122"} 2024-11-11T12:44:03,124 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-11-11T12:44:03,124 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-11T12:44:03,125 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329043124"}]},"ts":"1731329043124"} 2024-11-11T12:44:03,125 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLING in hbase:meta 2024-11-11T12:44:03,130 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, ASSIGN}] 2024-11-11T12:44:03,131 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, ASSIGN 2024-11-11T12:44:03,132 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=152, ppid=151, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, ASSIGN; state=OFFLINE, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=false 2024-11-11T12:44:03,282 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=cc6fe7bde4d6aa548700eb200610e776, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:03,284 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; OpenRegionProcedure cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:44:03,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-11T12:44:03,436 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:03,439 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:03,439 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7285): Opening region: {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:44:03,439 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,439 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:44:03,440 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7327): checking encryption for cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,440 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(7330): checking classloading for cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,441 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,442 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:44:03,443 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc6fe7bde4d6aa548700eb200610e776 columnFamilyName A 2024-11-11T12:44:03,443 DEBUG [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:03,443 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(327): Store=cc6fe7bde4d6aa548700eb200610e776/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:44:03,444 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,445 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:44:03,445 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc6fe7bde4d6aa548700eb200610e776 columnFamilyName B 2024-11-11T12:44:03,445 DEBUG [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:03,445 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(327): Store=cc6fe7bde4d6aa548700eb200610e776/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:44:03,446 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,447 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:44:03,447 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc6fe7bde4d6aa548700eb200610e776 columnFamilyName C 2024-11-11T12:44:03,447 DEBUG [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:03,447 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(327): Store=cc6fe7bde4d6aa548700eb200610e776/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:44:03,448 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:03,448 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,449 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,450 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:44:03,452 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1085): writing seq id for cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:03,454 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-11T12:44:03,454 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1102): Opened cc6fe7bde4d6aa548700eb200610e776; next sequenceid=2; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63353315, jitterRate=-0.05596204102039337}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:44:03,455 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegion(1001): Region open journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:03,456 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., pid=153, masterSystemTime=1731329043435 2024-11-11T12:44:03,458 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:03,458 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=153}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:03,458 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=152 updating hbase:meta row=cc6fe7bde4d6aa548700eb200610e776, regionState=OPEN, openSeqNum=2, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:03,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-11-11T12:44:03,461 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; OpenRegionProcedure cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 in 175 msec 2024-11-11T12:44:03,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=152, resume processing ppid=151 2024-11-11T12:44:03,462 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, ppid=151, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, ASSIGN in 331 msec 2024-11-11T12:44:03,463 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-11T12:44:03,463 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329043463"}]},"ts":"1731329043463"} 2024-11-11T12:44:03,464 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=ENABLED in hbase:meta 2024-11-11T12:44:03,467 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=151, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=TestAcidGuarantees execute state=CREATE_TABLE_POST_OPERATION 2024-11-11T12:44:03,469 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, state=SUCCESS; CreateTableProcedure table=TestAcidGuarantees in 779 msec 2024-11-11T12:44:03,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=151 2024-11-11T12:44:03,794 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:TestAcidGuarantees, procId: 151 completed 2024-11-11T12:44:03,795 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3fb684eb to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@537a66f8 2024-11-11T12:44:03,803 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ac53e79, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:03,805 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:03,807 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49430, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:03,813 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-11T12:44:03,814 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:42858, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-11T12:44:03,815 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (131072) is too small, which might cause very frequent flushing. 2024-11-11T12:44:03,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$14(2798): Client=jenkins//172.17.0.3 modify table TestAcidGuarantees from 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} to 'TestAcidGuarantees', {TABLE_ATTRIBUTES => {METADATA => {'hbase.hregion.compacting.memstore.type' => 'BASIC', 'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'A', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '4', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'B', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'C', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-11T12:44:03,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=154, state=RUNNABLE:MODIFY_TABLE_PREPARE; ModifyTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:03,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742400_1576 (size=996) 2024-11-11T12:44:04,237 DEBUG [PEWorker-2 {}] util.FSTableDescriptors(519): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000001.960 2024-11-11T12:44:04,237 INFO [PEWorker-2 {}] util.FSTableDescriptors(297): Updated tableinfo=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/.tabledesc/.tableinfo.0000000002.996 2024-11-11T12:44:04,239 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=155, ppid=154, state=RUNNABLE:REOPEN_TABLE_REGIONS_GET_REGIONS; ReopenTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:44:04,241 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, REOPEN/MOVE}] 2024-11-11T12:44:04,242 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, REOPEN/MOVE 2024-11-11T12:44:04,243 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=cc6fe7bde4d6aa548700eb200610e776, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,244 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:44:04,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE; CloseRegionProcedure cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:44:04,395 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,396 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(124): Close cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,396 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:44:04,396 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1681): Closing cc6fe7bde4d6aa548700eb200610e776, disabling compactions & flushes 2024-11-11T12:44:04,396 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:04,396 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:04,396 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. after waiting 0 ms 2024-11-11T12:44:04,396 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:04,400 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/recovered.edits/4.seqid, newMaxSeqId=4, maxSeqId=1 2024-11-11T12:44:04,401 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:04,401 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegion(1635): Region close journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:04,401 WARN [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] regionserver.HRegionServer(3786): Not adding moved region record: cc6fe7bde4d6aa548700eb200610e776 to self. 2024-11-11T12:44:04,403 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=157}] handler.UnassignRegionHandler(170): Closed cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,403 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=cc6fe7bde4d6aa548700eb200610e776, regionState=CLOSED 2024-11-11T12:44:04,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-11-11T12:44:04,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; CloseRegionProcedure cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 in 160 msec 2024-11-11T12:44:04,406 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=156, ppid=155, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, REOPEN/MOVE; state=CLOSED, location=32e78532c8b1,44673,1731328897232; forceNewPlan=false, retain=true 2024-11-11T12:44:04,557 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=cc6fe7bde4d6aa548700eb200610e776, regionState=OPENING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=158, ppid=156, state=RUNNABLE; OpenRegionProcedure cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:44:04,710 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,713 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] handler.AssignRegionHandler(135): Open TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:04,713 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7285): Opening region: {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} 2024-11-11T12:44:04,713 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestAcidGuarantees cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,713 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(894): Instantiated TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-11T12:44:04,714 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7327): checking encryption for cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,714 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(7330): checking classloading for cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,715 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family A of region cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,716 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.CompactingMemStore(122): Store=A, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:44:04,716 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc6fe7bde4d6aa548700eb200610e776 columnFamilyName A 2024-11-11T12:44:04,717 DEBUG [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:04,717 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(327): Store=cc6fe7bde4d6aa548700eb200610e776/A, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:44:04,718 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family B of region cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,718 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.CompactingMemStore(122): Store=B, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:44:04,718 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc6fe7bde4d6aa548700eb200610e776 columnFamilyName B 2024-11-11T12:44:04,718 DEBUG [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:04,719 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(327): Store=cc6fe7bde4d6aa548700eb200610e776/B, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:44:04,719 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family C of region cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,719 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.CompactingMemStore(122): Store=C, in-memory flush size threshold=2.00 MB, immutable segments index type=CHUNK_MAP, compactor=BASIC, pipelineThreshold=2, compactionCellMax=10 2024-11-11T12:44:04,719 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cc6fe7bde4d6aa548700eb200610e776 columnFamilyName C 2024-11-11T12:44:04,719 DEBUG [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:04,720 INFO [StoreOpener-cc6fe7bde4d6aa548700eb200610e776-1 {}] regionserver.HStore(327): Store=cc6fe7bde4d6aa548700eb200610e776/C, memstore type=CompactingMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-11T12:44:04,720 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1178): Setting FlushNonSloppyStoresFirstPolicy for the region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:04,720 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,721 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,722 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table TestAcidGuarantees descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-11T12:44:04,724 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1085): writing seq id for cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,724 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1102): Opened cc6fe7bde4d6aa548700eb200610e776; next sequenceid=5; ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71587082, jitterRate=0.06673064827919006}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-11T12:44:04,725 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegion(1001): Region open journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:04,726 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegionServer(2601): Post open deploy tasks for TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., pid=158, masterSystemTime=1731329044710 2024-11-11T12:44:04,727 DEBUG [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] regionserver.HRegionServer(2628): Finished post open deploy task for TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:04,727 INFO [RS_OPEN_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_OPEN_REGION, pid=158}] handler.AssignRegionHandler(164): Opened TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:04,727 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=156 updating hbase:meta row=cc6fe7bde4d6aa548700eb200610e776, regionState=OPEN, openSeqNum=5, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=158, resume processing ppid=156 2024-11-11T12:44:04,730 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; OpenRegionProcedure cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 in 171 msec 2024-11-11T12:44:04,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-11-11T12:44:04,731 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, REOPEN/MOVE in 489 msec 2024-11-11T12:44:04,733 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=155, resume processing ppid=154 2024-11-11T12:44:04,733 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, ppid=154, state=SUCCESS; ReopenTableRegionsProcedure table=TestAcidGuarantees in 493 msec 2024-11-11T12:44:04,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, state=SUCCESS; ModifyTableProcedure table=TestAcidGuarantees in 918 msec 2024-11-11T12:44:04,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=154 2024-11-11T12:44:04,737 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0644b7e6 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6094c70 2024-11-11T12:44:04,741 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5bc9c3e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,742 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5c9b5141 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@103dfc6e 2024-11-11T12:44:04,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7181df3b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,753 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11a52cdf to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6e047c09 2024-11-11T12:44:04,761 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11030ef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,762 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2d7fe431 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@60d631a3 2024-11-11T12:44:04,774 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69abefea, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,775 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x091d72db to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58971172 2024-11-11T12:44:04,778 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e757135, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,779 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d7fe93b to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7846cb78 2024-11-11T12:44:04,796 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@150e08ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,798 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11c440f7 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5f1754bc 2024-11-11T12:44:04,804 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a3b66d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,805 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x58460ef3 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@3d9113f3 2024-11-11T12:44:04,825 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5cfdf76c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,826 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6e8cd1ae to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5bb75907 2024-11-11T12:44:04,856 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@68c2838a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,857 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x4d832d43 to 127.0.0.1:54294 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2c1d3a95 2024-11-11T12:44:04,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50bf224f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-11T12:44:04,888 DEBUG [hconnection-0x76e2646b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,889 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:04,908 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:44:04,908 DEBUG [hconnection-0x3f766969-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:04,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:04,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:04,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:04,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:04,908 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:04,909 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49444, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,911 DEBUG [hconnection-0x45a864aa-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,917 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49450, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,920 DEBUG [hconnection-0x267aff80-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,922 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49466, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:04,925 DEBUG [hconnection-0x58681e90-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees 2024-11-11T12:44:04,927 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:04,928 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49476, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,929 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=159, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:04,929 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=159, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:04,930 DEBUG [hconnection-0x84e9bc5-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,930 DEBUG [hconnection-0x4cd06f2a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,932 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49484, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-11T12:44:04,937 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49488, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,944 DEBUG [hconnection-0x12dc2f25-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,944 DEBUG [hconnection-0x619d0016-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,945 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49498, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,952 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49510, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,954 DEBUG [hconnection-0x7f457114-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-11T12:44:04,958 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-11T12:44:04,959 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.3:49524, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-11T12:44:04,974 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:04,974 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 2 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329104968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329104969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,978 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:04,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329104973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:04,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329104973, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,978 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:04,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 5 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329104974, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:04,987 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111a1743446d6ae46a1abf9e9c5221af80d_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329044898/Put/seqid=0 2024-11-11T12:44:04,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742401_1577 (size=12154) 2024-11-11T12:44:04,997 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,010 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111a1743446d6ae46a1abf9e9c5221af80d_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111a1743446d6ae46a1abf9e9c5221af80d_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:05,011 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2afbc23dfdaf45c8a7a1694e33e7a12a, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:05,012 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2afbc23dfdaf45c8a7a1694e33e7a12a is 175, key is test_row_0/A:col10/1731329044898/Put/seqid=0 2024-11-11T12:44:05,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742402_1578 (size=30955) 2024-11-11T12:44:05,040 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=15, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2afbc23dfdaf45c8a7a1694e33e7a12a 2024-11-11T12:44:05,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-11T12:44:05,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,080 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329105079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 4 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329105079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,080 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/91af8830708f4968bdc1cf582fda38d9 is 50, key is test_row_0/B:col10/1731329044898/Put/seqid=0 2024-11-11T12:44:05,080 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329105079, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,081 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,082 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-11T12:44:05,082 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:05,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:05,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329105080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,083 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] handler.RSProcedureHandler(58): pid=160 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:05,083 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=160 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:05,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329105080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=160 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:05,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742403_1579 (size=12001) 2024-11-11T12:44:05,110 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/91af8830708f4968bdc1cf582fda38d9 2024-11-11T12:44:05,140 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/d78fb23f0472454aa8fbe543cacf84b7 is 50, key is test_row_0/C:col10/1731329044898/Put/seqid=0 2024-11-11T12:44:05,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742404_1580 (size=12001) 2024-11-11T12:44:05,171 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=15 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/d78fb23f0472454aa8fbe543cacf84b7 2024-11-11T12:44:05,182 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2afbc23dfdaf45c8a7a1694e33e7a12a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2afbc23dfdaf45c8a7a1694e33e7a12a 2024-11-11T12:44:05,187 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2afbc23dfdaf45c8a7a1694e33e7a12a, entries=150, sequenceid=15, filesize=30.2 K 2024-11-11T12:44:05,189 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/91af8830708f4968bdc1cf582fda38d9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91af8830708f4968bdc1cf582fda38d9 2024-11-11T12:44:05,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91af8830708f4968bdc1cf582fda38d9, entries=150, sequenceid=15, filesize=11.7 K 2024-11-11T12:44:05,200 ERROR [LeaseRenewer:jenkins@localhost:42421 {}] server.NIOServerCnxnFactory(85): Thread Thread[LeaseRenewer:jenkins@localhost:42421,5,PEWorkerGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hadoop.hdfs.DFSOutputStream.getNamespace()" because "outputStream" is null at org.apache.hadoop.hdfs.DFSClient.getNamespaces(DFSClient.java:596) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.renewLease(DFSClient.java:618) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.renew(LeaseRenewer.java:425) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:445) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) ~[hadoop-hdfs-client-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:05,205 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/d78fb23f0472454aa8fbe543cacf84b7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/d78fb23f0472454aa8fbe543cacf84b7 2024-11-11T12:44:05,212 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/d78fb23f0472454aa8fbe543cacf84b7, entries=150, sequenceid=15, filesize=11.7 K 2024-11-11T12:44:05,213 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cc6fe7bde4d6aa548700eb200610e776 in 304ms, sequenceid=15, compaction requested=false 2024-11-11T12:44:05,213 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:05,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-11T12:44:05,242 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,243 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=160 2024-11-11T12:44:05,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:05,243 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:44:05,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:05,243 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:05,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:05,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:05,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:05,244 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:05,258 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ae4056843cc245f2a4243141be9599fd_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329044973/Put/seqid=0 2024-11-11T12:44:05,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742405_1581 (size=12154) 2024-11-11T12:44:05,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,280 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ae4056843cc245f2a4243141be9599fd_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ae4056843cc245f2a4243141be9599fd_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:05,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/116adffcd85740b09de06167e9684c81, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:05,282 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/116adffcd85740b09de06167e9684c81 is 175, key is test_row_0/A:col10/1731329044973/Put/seqid=0 2024-11-11T12:44:05,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:05,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:05,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742406_1582 (size=30955) 2024-11-11T12:44:05,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329105304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,307 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 7 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329105304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329105304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,308 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329105304, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,310 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,310 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329105307, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 9 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329105409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,409 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329105409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 13 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329105409, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,410 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,410 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329105410, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,412 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,412 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329105412, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-11T12:44:05,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 11 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329105611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,611 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329105611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,613 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 15 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329105611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,613 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329105611, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,615 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329105614, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,712 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=40, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/116adffcd85740b09de06167e9684c81 2024-11-11T12:44:05,764 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/8fb32e1030544d56a2ed0f47a6b53cb3 is 50, key is test_row_0/B:col10/1731329044973/Put/seqid=0 2024-11-11T12:44:05,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742407_1583 (size=12001) 2024-11-11T12:44:05,801 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/8fb32e1030544d56a2ed0f47a6b53cb3 2024-11-11T12:44:05,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/0240677b27e7418db96a08d74890680d is 50, key is test_row_0/C:col10/1731329044973/Put/seqid=0 2024-11-11T12:44:05,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742408_1584 (size=12001) 2024-11-11T12:44:05,821 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=40 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/0240677b27e7418db96a08d74890680d 2024-11-11T12:44:05,827 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/116adffcd85740b09de06167e9684c81 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/116adffcd85740b09de06167e9684c81 2024-11-11T12:44:05,832 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/116adffcd85740b09de06167e9684c81, entries=150, sequenceid=40, filesize=30.2 K 2024-11-11T12:44:05,833 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/8fb32e1030544d56a2ed0f47a6b53cb3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/8fb32e1030544d56a2ed0f47a6b53cb3 2024-11-11T12:44:05,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,836 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/8fb32e1030544d56a2ed0f47a6b53cb3, entries=150, sequenceid=40, filesize=11.7 K 2024-11-11T12:44:05,837 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/0240677b27e7418db96a08d74890680d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0240677b27e7418db96a08d74890680d 2024-11-11T12:44:05,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,845 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0240677b27e7418db96a08d74890680d, entries=150, sequenceid=40, filesize=11.7 K 2024-11-11T12:44:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,846 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=53.67 KB/54960 for cc6fe7bde4d6aa548700eb200610e776 in 603ms, sequenceid=40, compaction requested=false 2024-11-11T12:44:05,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:05,846 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:05,847 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=160}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=160 2024-11-11T12:44:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=160 2024-11-11T12:44:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=159 2024-11-11T12:44:05,850 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 919 msec 2024-11-11T12:44:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,851 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=159, table=TestAcidGuarantees in 926 msec 2024-11-11T12:44:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,917 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-11T12:44:05,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:05,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:05,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:05,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:05,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:05,918 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:05,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,933 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111272aeb440bc44c6b9c44c512f50f5bcb_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_1/A:col10/1731329045916/Put/seqid=0 2024-11-11T12:44:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:05,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742409_1585 (size=12154) 2024-11-11T12:44:05,982 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 17 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329105979, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329105980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,983 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329105980, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329105982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:05,985 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:05,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 22 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329105982, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=159 2024-11-11T12:44:06,045 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 159 completed 2024-11-11T12:44:06,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:06,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees 2024-11-11T12:44:06,055 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:06,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-11T12:44:06,055 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=161, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:06,055 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:06,085 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329106084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,086 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 19 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329106084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329106084, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,086 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329106086, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,094 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 24 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329106093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-11T12:44:06,208 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,209 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:06,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:06,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,210 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,210 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,291 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329106290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,292 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329106290, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 21 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329106294, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329106295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,295 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,295 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 26 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329106295, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,351 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:06,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-11T12:44:06,363 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,364 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:06,364 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111272aeb440bc44c6b9c44c512f50f5bcb_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111272aeb440bc44c6b9c44c512f50f5bcb_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:06,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:06,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,366 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,366 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,367 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2f0f0a20645242b0bea9f8020b1326c8, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:06,368 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2f0f0a20645242b0bea9f8020b1326c8 is 175, key is test_row_1/A:col10/1731329045916/Put/seqid=0 2024-11-11T12:44:06,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742410_1586 (size=30951) 2024-11-11T12:44:06,519 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,520 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:06,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:06,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,523 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,523 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 32 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329106595, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 27 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329106596, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,597 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 23 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329106597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,598 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 28 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329106597, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,600 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:06,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329106599, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-11T12:44:06,675 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,676 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:06,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:06,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,676 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,807 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=52, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2f0f0a20645242b0bea9f8020b1326c8 2024-11-11T12:44:06,817 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/b97da4fb83f9490aac2e076d955cb1e1 is 50, key is test_row_1/B:col10/1731329045916/Put/seqid=0 2024-11-11T12:44:06,830 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,830 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:06,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,830 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:06,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,831 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,831 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742411_1587 (size=9657) 2024-11-11T12:44:06,868 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/b97da4fb83f9490aac2e076d955cb1e1 2024-11-11T12:44:06,883 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/1e1c3f9fe2364ffb85261439fd127e75 is 50, key is test_row_1/C:col10/1731329045916/Put/seqid=0 2024-11-11T12:44:06,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742412_1588 (size=9657) 2024-11-11T12:44:06,986 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:06,987 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:06,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:06,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:06,987 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,987 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:06,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:07,101 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:07,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329107101, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:07,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:07,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329107102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:07,103 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:07,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 25 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329107102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:07,104 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:07,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 30 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329107102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:07,106 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:07,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329107105, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:07,141 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:07,142 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:07,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:07,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,142 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:07,142 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:07,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:07,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-11T12:44:07,173 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-11T12:44:07,288 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=52 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/1e1c3f9fe2364ffb85261439fd127e75 2024-11-11T12:44:07,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2f0f0a20645242b0bea9f8020b1326c8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2f0f0a20645242b0bea9f8020b1326c8 2024-11-11T12:44:07,295 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:07,295 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:07,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:07,295 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,296 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] handler.RSProcedureHandler(58): pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:07,296 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=162 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:07,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=162 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:07,297 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2f0f0a20645242b0bea9f8020b1326c8, entries=150, sequenceid=52, filesize=30.2 K 2024-11-11T12:44:07,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/b97da4fb83f9490aac2e076d955cb1e1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b97da4fb83f9490aac2e076d955cb1e1 2024-11-11T12:44:07,301 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b97da4fb83f9490aac2e076d955cb1e1, entries=100, sequenceid=52, filesize=9.4 K 2024-11-11T12:44:07,302 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/1e1c3f9fe2364ffb85261439fd127e75 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1e1c3f9fe2364ffb85261439fd127e75 2024-11-11T12:44:07,306 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1e1c3f9fe2364ffb85261439fd127e75, entries=100, sequenceid=52, filesize=9.4 K 2024-11-11T12:44:07,307 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cc6fe7bde4d6aa548700eb200610e776 in 1390ms, sequenceid=52, compaction requested=true 2024-11-11T12:44:07,307 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:07,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:44:07,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:07,307 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:07,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:44:07,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:07,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:07,307 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:07,307 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:07,308 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 92861 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:07,308 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:07,308 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/A is initiating minor compaction (all files) 2024-11-11T12:44:07,308 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/B is initiating minor compaction (all files) 2024-11-11T12:44:07,308 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/A in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,308 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/B in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,308 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2afbc23dfdaf45c8a7a1694e33e7a12a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/116adffcd85740b09de06167e9684c81, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2f0f0a20645242b0bea9f8020b1326c8] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=90.7 K 2024-11-11T12:44:07,308 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91af8830708f4968bdc1cf582fda38d9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/8fb32e1030544d56a2ed0f47a6b53cb3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b97da4fb83f9490aac2e076d955cb1e1] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=32.9 K 2024-11-11T12:44:07,309 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 91af8830708f4968bdc1cf582fda38d9, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731329044898 2024-11-11T12:44:07,308 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,309 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8fb32e1030544d56a2ed0f47a6b53cb3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731329044951 2024-11-11T12:44:07,309 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2afbc23dfdaf45c8a7a1694e33e7a12a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/116adffcd85740b09de06167e9684c81, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2f0f0a20645242b0bea9f8020b1326c8] 2024-11-11T12:44:07,309 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting b97da4fb83f9490aac2e076d955cb1e1, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329045303 2024-11-11T12:44:07,309 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2afbc23dfdaf45c8a7a1694e33e7a12a, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731329044898 2024-11-11T12:44:07,310 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 116adffcd85740b09de06167e9684c81, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731329044951 2024-11-11T12:44:07,310 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2f0f0a20645242b0bea9f8020b1326c8, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329045293 2024-11-11T12:44:07,326 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#B#compaction#498 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:07,326 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/c48784e35302408a85c56dcd3a04370f is 50, key is test_row_0/B:col10/1731329044973/Put/seqid=0 2024-11-11T12:44:07,328 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:07,333 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111d66c22f1ebf8439aa3e05ecdf766f94c_cc6fe7bde4d6aa548700eb200610e776 store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:07,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742413_1589 (size=12104) 2024-11-11T12:44:07,335 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111d66c22f1ebf8439aa3e05ecdf766f94c_cc6fe7bde4d6aa548700eb200610e776, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:07,335 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111d66c22f1ebf8439aa3e05ecdf766f94c_cc6fe7bde4d6aa548700eb200610e776 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:07,339 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/c48784e35302408a85c56dcd3a04370f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c48784e35302408a85c56dcd3a04370f 2024-11-11T12:44:07,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742414_1590 (size=4469) 2024-11-11T12:44:07,345 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/B of cc6fe7bde4d6aa548700eb200610e776 into c48784e35302408a85c56dcd3a04370f(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:07,345 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:07,345 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/B, priority=13, startTime=1731329047307; duration=0sec 2024-11-11T12:44:07,345 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:07,345 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:07,345 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:07,346 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 33659 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:07,346 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/C is initiating minor compaction (all files) 2024-11-11T12:44:07,346 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/C in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,346 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/d78fb23f0472454aa8fbe543cacf84b7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0240677b27e7418db96a08d74890680d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1e1c3f9fe2364ffb85261439fd127e75] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=32.9 K 2024-11-11T12:44:07,347 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting d78fb23f0472454aa8fbe543cacf84b7, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=15, earliestPutTs=1731329044898 2024-11-11T12:44:07,347 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0240677b27e7418db96a08d74890680d, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=40, earliestPutTs=1731329044951 2024-11-11T12:44:07,347 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1e1c3f9fe2364ffb85261439fd127e75, keycount=100, bloomtype=ROW, size=9.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329045303 2024-11-11T12:44:07,355 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#C#compaction#500 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:07,356 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/c83508e720cd4ddfaf9b3f9dda8b4862 is 50, key is test_row_0/C:col10/1731329044973/Put/seqid=0 2024-11-11T12:44:07,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742415_1591 (size=12104) 2024-11-11T12:44:07,376 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/c83508e720cd4ddfaf9b3f9dda8b4862 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c83508e720cd4ddfaf9b3f9dda8b4862 2024-11-11T12:44:07,382 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/C of cc6fe7bde4d6aa548700eb200610e776 into c83508e720cd4ddfaf9b3f9dda8b4862(size=11.8 K), total size for store is 11.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:07,383 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:07,383 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/C, priority=13, startTime=1731329047307; duration=0sec 2024-11-11T12:44:07,383 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:07,383 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:07,447 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:07,447 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=162 2024-11-11T12:44:07,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:07,448 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:44:07,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:07,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:07,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:07,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:07,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:07,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:07,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111dfaa2562f29e46088671b5f9533fd962_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329045981/Put/seqid=0 2024-11-11T12:44:07,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742416_1592 (size=12154) 2024-11-11T12:44:07,744 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#A#compaction#499 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:07,745 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/05015dfa8cc84b17a497700bb5e097c3 is 175, key is test_row_0/A:col10/1731329044973/Put/seqid=0 2024-11-11T12:44:07,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742417_1593 (size=31165) 2024-11-11T12:44:07,759 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/05015dfa8cc84b17a497700bb5e097c3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05015dfa8cc84b17a497700bb5e097c3 2024-11-11T12:44:07,765 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/A of cc6fe7bde4d6aa548700eb200610e776 into 05015dfa8cc84b17a497700bb5e097c3(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:07,765 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:07,765 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/A, priority=13, startTime=1731329047307; duration=0sec 2024-11-11T12:44:07,765 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:07,765 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:07,924 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:07,936 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111dfaa2562f29e46088671b5f9533fd962_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111dfaa2562f29e46088671b5f9533fd962_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:07,937 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/067c2114451943dcb08ba960c6662ac9, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:07,938 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/067c2114451943dcb08ba960c6662ac9 is 175, key is test_row_0/A:col10/1731329045981/Put/seqid=0 2024-11-11T12:44:07,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742418_1594 (size=30955) 2024-11-11T12:44:07,958 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=78, memsize=47.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/067c2114451943dcb08ba960c6662ac9 2024-11-11T12:44:07,965 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/c664c198a5b348a09008b3bf5691be14 is 50, key is test_row_0/B:col10/1731329045981/Put/seqid=0 2024-11-11T12:44:07,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742419_1595 (size=12001) 2024-11-11T12:44:07,971 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/c664c198a5b348a09008b3bf5691be14 2024-11-11T12:44:07,978 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/f7b94925f9bf4b2ca909cd673c1c42da is 50, key is test_row_0/C:col10/1731329045981/Put/seqid=0 2024-11-11T12:44:07,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742420_1596 (size=12001) 2024-11-11T12:44:08,114 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:08,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,129 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 33 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329108122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 38 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329108122, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 47 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329108129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,132 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 34 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329108129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,132 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 29 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329108129, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-11T12:44:08,230 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329108230, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,232 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 40 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329108231, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,234 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 36 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329108233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 31 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329108233, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,235 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 49 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329108234, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,401 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=46.96 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/f7b94925f9bf4b2ca909cd673c1c42da 2024-11-11T12:44:08,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/067c2114451943dcb08ba960c6662ac9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/067c2114451943dcb08ba960c6662ac9 2024-11-11T12:44:08,409 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/067c2114451943dcb08ba960c6662ac9, entries=150, sequenceid=78, filesize=30.2 K 2024-11-11T12:44:08,409 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/c664c198a5b348a09008b3bf5691be14 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c664c198a5b348a09008b3bf5691be14 2024-11-11T12:44:08,412 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c664c198a5b348a09008b3bf5691be14, entries=150, sequenceid=78, filesize=11.7 K 2024-11-11T12:44:08,413 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/f7b94925f9bf4b2ca909cd673c1c42da as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f7b94925f9bf4b2ca909cd673c1c42da 2024-11-11T12:44:08,417 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f7b94925f9bf4b2ca909cd673c1c42da, entries=150, sequenceid=78, filesize=11.7 K 2024-11-11T12:44:08,418 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(3040): Finished flush of dataSize ~140.89 KB/144270, heapSize ~369.84 KB/378720, currentSize=73.80 KB/75570 for cc6fe7bde4d6aa548700eb200610e776 in 970ms, sequenceid=78, compaction requested=false 2024-11-11T12:44:08,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:08,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:08,418 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=162}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=162 2024-11-11T12:44:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=162 2024-11-11T12:44:08,420 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=162, resume processing ppid=161 2024-11-11T12:44:08,420 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, ppid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.3640 sec 2024-11-11T12:44:08,422 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=161, table=TestAcidGuarantees in 2.3680 sec 2024-11-11T12:44:08,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:08,434 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-11T12:44:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:08,434 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:08,443 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111498c12bc982148ce8a5175a0b20ec530_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329048124/Put/seqid=0 2024-11-11T12:44:08,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742421_1597 (size=12154) 2024-11-11T12:44:08,457 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329108454, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329108455, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,459 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329108456, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,460 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 42 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329108457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,460 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 37 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329108457, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,560 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329108559, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329108560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,561 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329108560, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,564 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 39 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329108561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,564 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 44 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329108561, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,763 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329108762, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,764 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329108763, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,765 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329108764, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 41 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329108766, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,768 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:08,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329108767, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:08,854 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:08,859 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111498c12bc982148ce8a5175a0b20ec530_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111498c12bc982148ce8a5175a0b20ec530_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:08,860 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/1f309171af27496bb2f2c70fa54c5ed1, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:08,860 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/1f309171af27496bb2f2c70fa54c5ed1 is 175, key is test_row_0/A:col10/1731329048124/Put/seqid=0 2024-11-11T12:44:08,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742422_1598 (size=30955) 2024-11-11T12:44:09,066 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329109066, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,067 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329109067, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,069 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329109068, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 43 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329109069, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,071 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329109070, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,268 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=94, memsize=26.8 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/1f309171af27496bb2f2c70fa54c5ed1 2024-11-11T12:44:09,294 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/cc48fa6a15be42b7bfc375807eb01ea3 is 50, key is test_row_0/B:col10/1731329048124/Put/seqid=0 2024-11-11T12:44:09,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742423_1599 (size=12001) 2024-11-11T12:44:09,349 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/cc48fa6a15be42b7bfc375807eb01ea3 2024-11-11T12:44:09,358 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/ea929f0468544078bf24b3b83005501f is 50, key is test_row_0/C:col10/1731329048124/Put/seqid=0 2024-11-11T12:44:09,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742424_1600 (size=12001) 2024-11-11T12:44:09,364 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=26.84 KB at sequenceid=94 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/ea929f0468544078bf24b3b83005501f 2024-11-11T12:44:09,370 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/1f309171af27496bb2f2c70fa54c5ed1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1f309171af27496bb2f2c70fa54c5ed1 2024-11-11T12:44:09,374 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1f309171af27496bb2f2c70fa54c5ed1, entries=150, sequenceid=94, filesize=30.2 K 2024-11-11T12:44:09,375 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/cc48fa6a15be42b7bfc375807eb01ea3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/cc48fa6a15be42b7bfc375807eb01ea3 2024-11-11T12:44:09,379 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/cc48fa6a15be42b7bfc375807eb01ea3, entries=150, sequenceid=94, filesize=11.7 K 2024-11-11T12:44:09,380 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/ea929f0468544078bf24b3b83005501f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ea929f0468544078bf24b3b83005501f 2024-11-11T12:44:09,383 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ea929f0468544078bf24b3b83005501f, entries=150, sequenceid=94, filesize=11.7 K 2024-11-11T12:44:09,384 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~80.51 KB/82440, heapSize ~211.64 KB/216720, currentSize=127.47 KB/130530 for cc6fe7bde4d6aa548700eb200610e776 in 950ms, sequenceid=94, compaction requested=true 2024-11-11T12:44:09,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:09,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:44:09,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:09,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:44:09,384 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:09,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:09,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:09,384 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:09,384 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:09,385 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93075 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:09,385 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/A is initiating minor compaction (all files) 2024-11-11T12:44:09,385 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/A in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:09,385 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05015dfa8cc84b17a497700bb5e097c3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/067c2114451943dcb08ba960c6662ac9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1f309171af27496bb2f2c70fa54c5ed1] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=90.9 K 2024-11-11T12:44:09,385 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:09,385 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05015dfa8cc84b17a497700bb5e097c3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/067c2114451943dcb08ba960c6662ac9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1f309171af27496bb2f2c70fa54c5ed1] 2024-11-11T12:44:09,386 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05015dfa8cc84b17a497700bb5e097c3, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329044973 2024-11-11T12:44:09,386 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:09,386 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/B is initiating minor compaction (all files) 2024-11-11T12:44:09,386 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/B in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:09,386 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c48784e35302408a85c56dcd3a04370f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c664c198a5b348a09008b3bf5691be14, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/cc48fa6a15be42b7bfc375807eb01ea3] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=35.3 K 2024-11-11T12:44:09,386 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 067c2114451943dcb08ba960c6662ac9, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731329045978 2024-11-11T12:44:09,386 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c48784e35302408a85c56dcd3a04370f, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329044973 2024-11-11T12:44:09,386 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 1f309171af27496bb2f2c70fa54c5ed1, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731329048108 2024-11-11T12:44:09,386 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c664c198a5b348a09008b3bf5691be14, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731329045978 2024-11-11T12:44:09,387 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting cc48fa6a15be42b7bfc375807eb01ea3, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731329048108 2024-11-11T12:44:09,393 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:09,395 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#B#compaction#508 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:09,395 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/bbcdac28ab014e41aa95516f035e22cb is 50, key is test_row_0/B:col10/1731329048124/Put/seqid=0 2024-11-11T12:44:09,401 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111ef1a059bdd9c464090f8971433b5183c_cc6fe7bde4d6aa548700eb200610e776 store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:09,402 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111ef1a059bdd9c464090f8971433b5183c_cc6fe7bde4d6aa548700eb200610e776, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:09,403 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ef1a059bdd9c464090f8971433b5183c_cc6fe7bde4d6aa548700eb200610e776 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:09,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742426_1602 (size=4469) 2024-11-11T12:44:09,431 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#A#compaction#507 average throughput is 0.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:09,431 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/05f7ca2c9119448986cb23fda57e632a is 175, key is test_row_0/A:col10/1731329048124/Put/seqid=0 2024-11-11T12:44:09,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742425_1601 (size=12207) 2024-11-11T12:44:09,440 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/bbcdac28ab014e41aa95516f035e22cb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbcdac28ab014e41aa95516f035e22cb 2024-11-11T12:44:09,445 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/B of cc6fe7bde4d6aa548700eb200610e776 into bbcdac28ab014e41aa95516f035e22cb(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:09,445 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:09,445 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/B, priority=13, startTime=1731329049384; duration=0sec 2024-11-11T12:44:09,446 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:09,446 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:09,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742427_1603 (size=31161) 2024-11-11T12:44:09,446 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:09,447 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36106 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:09,447 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/C is initiating minor compaction (all files) 2024-11-11T12:44:09,447 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/C in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:09,447 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c83508e720cd4ddfaf9b3f9dda8b4862, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f7b94925f9bf4b2ca909cd673c1c42da, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ea929f0468544078bf24b3b83005501f] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=35.3 K 2024-11-11T12:44:09,447 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c83508e720cd4ddfaf9b3f9dda8b4862, keycount=150, bloomtype=ROW, size=11.8 K, encoding=NONE, compression=NONE, seqNum=52, earliestPutTs=1731329044973 2024-11-11T12:44:09,448 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f7b94925f9bf4b2ca909cd673c1c42da, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731329045978 2024-11-11T12:44:09,448 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ea929f0468544078bf24b3b83005501f, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731329048108 2024-11-11T12:44:09,456 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/05f7ca2c9119448986cb23fda57e632a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05f7ca2c9119448986cb23fda57e632a 2024-11-11T12:44:09,462 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/A of cc6fe7bde4d6aa548700eb200610e776 into 05f7ca2c9119448986cb23fda57e632a(size=30.4 K), total size for store is 30.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:09,462 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:09,462 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/A, priority=13, startTime=1731329049384; duration=0sec 2024-11-11T12:44:09,462 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:09,462 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:09,466 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#C#compaction#509 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:09,466 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/1d805ec9d46a441089222539ed0e3493 is 50, key is test_row_0/C:col10/1731329048124/Put/seqid=0 2024-11-11T12:44:09,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742428_1604 (size=12207) 2024-11-11T12:44:09,480 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/1d805ec9d46a441089222539ed0e3493 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d805ec9d46a441089222539ed0e3493 2024-11-11T12:44:09,484 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/C of cc6fe7bde4d6aa548700eb200610e776 into 1d805ec9d46a441089222539ed0e3493(size=11.9 K), total size for store is 11.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:09,484 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:09,484 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/C, priority=13, startTime=1731329049384; duration=0sec 2024-11-11T12:44:09,484 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:09,484 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:09,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:09,569 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:44:09,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:09,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:09,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:09,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:09,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:09,569 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:09,576 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111b1502f82b3f4412f9e7d427c53e65636_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329048449/Put/seqid=0 2024-11-11T12:44:09,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742429_1605 (size=12154) 2024-11-11T12:44:09,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 46 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329109577, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329109578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329109578, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,581 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329109579, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,582 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 51 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329109580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,683 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 48 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329109682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 56 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329109682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329109682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,684 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329109682, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,684 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 53 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329109683, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,885 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 50 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329109885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329109885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329109885, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,886 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 58 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329109886, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,888 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:09,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 55 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329109887, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:09,980 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:09,984 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111b1502f82b3f4412f9e7d427c53e65636_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b1502f82b3f4412f9e7d427c53e65636_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:09,985 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/7f06247121464cd0b071fd8ab6817e21, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:09,985 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/7f06247121464cd0b071fd8ab6817e21 is 175, key is test_row_0/A:col10/1731329048449/Put/seqid=0 2024-11-11T12:44:09,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742430_1606 (size=30955) 2024-11-11T12:44:10,162 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-11-11T12:44:10,162 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 161 completed 2024-11-11T12:44:10,163 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:10,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees 2024-11-11T12:44:10,165 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:10,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-11T12:44:10,165 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=163, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:10,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=164, ppid=163, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:10,188 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329110187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,189 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 60 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329110187, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,189 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329110188, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,190 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 52 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329110189, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,193 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 57 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329110191, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-11T12:44:10,317 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,317 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-11T12:44:10,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,317 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:10,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,318 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,318 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,318 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,390 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=120, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/7f06247121464cd0b071fd8ab6817e21 2024-11-11T12:44:10,397 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/ae74c2c4d87444e9bce4955f2385667a is 50, key is test_row_0/B:col10/1731329048449/Put/seqid=0 2024-11-11T12:44:10,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742431_1607 (size=12001) 2024-11-11T12:44:10,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/ae74c2c4d87444e9bce4955f2385667a 2024-11-11T12:44:10,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/a1e47bcdb0d94c00917bdba4ac0ec280 is 50, key is test_row_0/C:col10/1731329048449/Put/seqid=0 2024-11-11T12:44:10,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742432_1608 (size=12001) 2024-11-11T12:44:10,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-11T12:44:10,469 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,470 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-11T12:44:10,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:10,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,470 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,470 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,622 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,623 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-11T12:44:10,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:10,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,623 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,623 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,691 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329110690, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,693 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329110691, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 64 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329110692, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,694 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 54 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329110693, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,698 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:10,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 59 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329110697, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-11T12:44:10,775 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,775 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-11T12:44:10,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,775 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:10,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,776 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] handler.RSProcedureHandler(58): pid=164 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,776 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=164 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=164 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:10,818 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=120 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/a1e47bcdb0d94c00917bdba4ac0ec280 2024-11-11T12:44:10,822 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/7f06247121464cd0b071fd8ab6817e21 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/7f06247121464cd0b071fd8ab6817e21 2024-11-11T12:44:10,825 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/7f06247121464cd0b071fd8ab6817e21, entries=150, sequenceid=120, filesize=30.2 K 2024-11-11T12:44:10,826 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/ae74c2c4d87444e9bce4955f2385667a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/ae74c2c4d87444e9bce4955f2385667a 2024-11-11T12:44:10,830 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/ae74c2c4d87444e9bce4955f2385667a, entries=150, sequenceid=120, filesize=11.7 K 2024-11-11T12:44:10,830 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/a1e47bcdb0d94c00917bdba4ac0ec280 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/a1e47bcdb0d94c00917bdba4ac0ec280 2024-11-11T12:44:10,834 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/a1e47bcdb0d94c00917bdba4ac0ec280, entries=150, sequenceid=120, filesize=11.7 K 2024-11-11T12:44:10,834 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for cc6fe7bde4d6aa548700eb200610e776 in 1265ms, sequenceid=120, compaction requested=false 2024-11-11T12:44:10,835 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:10,928 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:10,929 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=164 2024-11-11T12:44:10,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:10,929 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:44:10,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:10,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:10,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:10,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:10,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:10,929 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:10,936 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111194fec8055e824b729935dfdfd9b443f6_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329049579/Put/seqid=0 2024-11-11T12:44:10,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742433_1609 (size=12304) 2024-11-11T12:44:10,943 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:10,947 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111194fec8055e824b729935dfdfd9b443f6_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111194fec8055e824b729935dfdfd9b443f6_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:10,948 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/239cfe4378d7483fb4b24f785614df7a, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:10,949 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/239cfe4378d7483fb4b24f785614df7a is 175, key is test_row_0/A:col10/1731329049579/Put/seqid=0 2024-11-11T12:44:10,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742434_1610 (size=31105) 2024-11-11T12:44:11,268 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-11T12:44:11,353 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=133, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/239cfe4378d7483fb4b24f785614df7a 2024-11-11T12:44:11,360 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/793617b77b8944cd943fe68eefb2ff95 is 50, key is test_row_0/B:col10/1731329049579/Put/seqid=0 2024-11-11T12:44:11,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742435_1611 (size=12151) 2024-11-11T12:44:11,365 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/793617b77b8944cd943fe68eefb2ff95 2024-11-11T12:44:11,372 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/0b446eca86564fa492cbbe91b2499025 is 50, key is test_row_0/C:col10/1731329049579/Put/seqid=0 2024-11-11T12:44:11,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742436_1612 (size=12151) 2024-11-11T12:44:11,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:11,696 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:11,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 62 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329111712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 70 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329111712, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329111713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 61 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329111713, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,716 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329111714, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,776 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=133 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/0b446eca86564fa492cbbe91b2499025 2024-11-11T12:44:11,780 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/239cfe4378d7483fb4b24f785614df7a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/239cfe4378d7483fb4b24f785614df7a 2024-11-11T12:44:11,784 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/239cfe4378d7483fb4b24f785614df7a, entries=150, sequenceid=133, filesize=30.4 K 2024-11-11T12:44:11,788 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/793617b77b8944cd943fe68eefb2ff95 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/793617b77b8944cd943fe68eefb2ff95 2024-11-11T12:44:11,791 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/793617b77b8944cd943fe68eefb2ff95, entries=150, sequenceid=133, filesize=11.9 K 2024-11-11T12:44:11,792 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/0b446eca86564fa492cbbe91b2499025 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0b446eca86564fa492cbbe91b2499025 2024-11-11T12:44:11,795 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0b446eca86564fa492cbbe91b2499025, entries=150, sequenceid=133, filesize=11.9 K 2024-11-11T12:44:11,796 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for cc6fe7bde4d6aa548700eb200610e776 in 867ms, sequenceid=133, compaction requested=true 2024-11-11T12:44:11,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:11,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:11,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=164}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=164 2024-11-11T12:44:11,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=164 2024-11-11T12:44:11,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=163 2024-11-11T12:44:11,798 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.6320 sec 2024-11-11T12:44:11,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=163, table=TestAcidGuarantees in 1.6360 sec 2024-11-11T12:44:11,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:11,819 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=140.89 KB heapSize=369.89 KB 2024-11-11T12:44:11,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:11,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:11,819 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:11,820 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:11,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 84 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329111826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,829 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 74 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329111826, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 65 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329111827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,830 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329111827, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,831 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329111828, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,845 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111d19584a608d14c08840d1459c5ed5408_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329051711/Put/seqid=0 2024-11-11T12:44:11,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742437_1613 (size=12304) 2024-11-11T12:44:11,934 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 86 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329111932, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 67 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329111933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 76 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329111933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,935 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,935 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329111933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:11,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 75 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329111933, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,137 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,137 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 88 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329112135, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329112136, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 78 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329112137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,138 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329112137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,139 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329112137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,256 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:12,269 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111d19584a608d14c08840d1459c5ed5408_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111d19584a608d14c08840d1459c5ed5408_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:12,270 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/61adcc8e003e4506977ce9862916223f, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:12,271 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/61adcc8e003e4506977ce9862916223f is 175, key is test_row_0/A:col10/1731329051711/Put/seqid=0 2024-11-11T12:44:12,272 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=163 2024-11-11T12:44:12,276 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 163 completed 2024-11-11T12:44:12,280 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:12,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees 2024-11-11T12:44:12,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-11T12:44:12,282 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:12,283 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=165, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:12,283 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:12,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742438_1614 (size=31105) 2024-11-11T12:44:12,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-11T12:44:12,434 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,435 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-11T12:44:12,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:12,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,439 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329112439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,441 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329112439, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 90 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329112440, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 73 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329112441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,442 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 80 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329112441, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-11T12:44:12,587 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,588 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-11T12:44:12,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:12,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,588 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,588 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,695 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=158, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/61adcc8e003e4506977ce9862916223f 2024-11-11T12:44:12,718 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e50c53862b034ba5b02ba76c90a152d6 is 50, key is test_row_0/B:col10/1731329051711/Put/seqid=0 2024-11-11T12:44:12,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742439_1615 (size=12151) 2024-11-11T12:44:12,740 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e50c53862b034ba5b02ba76c90a152d6 2024-11-11T12:44:12,742 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,743 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-11T12:44:12,743 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:12,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,744 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] handler.RSProcedureHandler(58): pid=166 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,744 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=166 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=166 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:12,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/8f0ff88ef2dc45ff952621cb7a0be07e is 50, key is test_row_0/C:col10/1731329051711/Put/seqid=0 2024-11-11T12:44:12,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742440_1616 (size=12151) 2024-11-11T12:44:12,800 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=158 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/8f0ff88ef2dc45ff952621cb7a0be07e 2024-11-11T12:44:12,847 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/61adcc8e003e4506977ce9862916223f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/61adcc8e003e4506977ce9862916223f 2024-11-11T12:44:12,874 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/61adcc8e003e4506977ce9862916223f, entries=150, sequenceid=158, filesize=30.4 K 2024-11-11T12:44:12,874 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e50c53862b034ba5b02ba76c90a152d6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e50c53862b034ba5b02ba76c90a152d6 2024-11-11T12:44:12,879 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e50c53862b034ba5b02ba76c90a152d6, entries=150, sequenceid=158, filesize=11.9 K 2024-11-11T12:44:12,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/8f0ff88ef2dc45ff952621cb7a0be07e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/8f0ff88ef2dc45ff952621cb7a0be07e 2024-11-11T12:44:12,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-11T12:44:12,886 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/8f0ff88ef2dc45ff952621cb7a0be07e, entries=150, sequenceid=158, filesize=11.9 K 2024-11-11T12:44:12,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for cc6fe7bde4d6aa548700eb200610e776 in 1068ms, sequenceid=158, compaction requested=true 2024-11-11T12:44:12,887 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:12,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:44:12,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:12,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:44:12,887 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:12,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:12,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:12,887 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:12,887 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:12,888 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:12,888 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 124326 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:12,888 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/B is initiating minor compaction (all files) 2024-11-11T12:44:12,888 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/A is initiating minor compaction (all files) 2024-11-11T12:44:12,888 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/B in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,888 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/A in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,888 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbcdac28ab014e41aa95516f035e22cb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/ae74c2c4d87444e9bce4955f2385667a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/793617b77b8944cd943fe68eefb2ff95, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e50c53862b034ba5b02ba76c90a152d6] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=47.4 K 2024-11-11T12:44:12,889 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05f7ca2c9119448986cb23fda57e632a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/7f06247121464cd0b071fd8ab6817e21, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/239cfe4378d7483fb4b24f785614df7a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/61adcc8e003e4506977ce9862916223f] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=121.4 K 2024-11-11T12:44:12,889 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,889 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05f7ca2c9119448986cb23fda57e632a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/7f06247121464cd0b071fd8ab6817e21, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/239cfe4378d7483fb4b24f785614df7a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/61adcc8e003e4506977ce9862916223f] 2024-11-11T12:44:12,889 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bbcdac28ab014e41aa95516f035e22cb, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731329048108 2024-11-11T12:44:12,889 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 05f7ca2c9119448986cb23fda57e632a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731329048108 2024-11-11T12:44:12,889 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ae74c2c4d87444e9bce4955f2385667a, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1731329048449 2024-11-11T12:44:12,890 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 7f06247121464cd0b071fd8ab6817e21, keycount=150, bloomtype=ROW, size=30.2 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1731329048449 2024-11-11T12:44:12,890 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 793617b77b8944cd943fe68eefb2ff95, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1731329049576 2024-11-11T12:44:12,890 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 239cfe4378d7483fb4b24f785614df7a, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1731329049576 2024-11-11T12:44:12,890 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting e50c53862b034ba5b02ba76c90a152d6, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1731329051711 2024-11-11T12:44:12,891 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 61adcc8e003e4506977ce9862916223f, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1731329051711 2024-11-11T12:44:12,896 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,896 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=166 2024-11-11T12:44:12,896 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,897 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=60.38 KB heapSize=158.95 KB 2024-11-11T12:44:12,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:12,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:12,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:12,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:12,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:12,897 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:12,909 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:12,914 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#B#compaction#520 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:12,914 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/a9a287fc98d344009c41de6b99aa9414 is 50, key is test_row_0/B:col10/1731329051711/Put/seqid=0 2024-11-11T12:44:12,915 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411118c111e5583c7418f860c4ed9978e0347_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329051825/Put/seqid=0 2024-11-11T12:44:12,917 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111f3920b501cba4ed49402ed581230026b_cc6fe7bde4d6aa548700eb200610e776 store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:12,919 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111f3920b501cba4ed49402ed581230026b_cc6fe7bde4d6aa548700eb200610e776, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:12,920 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111f3920b501cba4ed49402ed581230026b_cc6fe7bde4d6aa548700eb200610e776 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:12,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742441_1617 (size=12493) 2024-11-11T12:44:12,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742442_1618 (size=12304) 2024-11-11T12:44:12,927 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/a9a287fc98d344009c41de6b99aa9414 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a9a287fc98d344009c41de6b99aa9414 2024-11-11T12:44:12,932 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/B of cc6fe7bde4d6aa548700eb200610e776 into a9a287fc98d344009c41de6b99aa9414(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:12,932 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:12,932 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/B, priority=12, startTime=1731329052887; duration=0sec 2024-11-11T12:44:12,932 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:12,932 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:12,932 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:12,935 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48510 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:12,935 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/C is initiating minor compaction (all files) 2024-11-11T12:44:12,935 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/C in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:12,935 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d805ec9d46a441089222539ed0e3493, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/a1e47bcdb0d94c00917bdba4ac0ec280, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0b446eca86564fa492cbbe91b2499025, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/8f0ff88ef2dc45ff952621cb7a0be07e] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=47.4 K 2024-11-11T12:44:12,935 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d805ec9d46a441089222539ed0e3493, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=94, earliestPutTs=1731329048108 2024-11-11T12:44:12,936 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a1e47bcdb0d94c00917bdba4ac0ec280, keycount=150, bloomtype=ROW, size=11.7 K, encoding=NONE, compression=NONE, seqNum=120, earliestPutTs=1731329048449 2024-11-11T12:44:12,936 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0b446eca86564fa492cbbe91b2499025, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=133, earliestPutTs=1731329049576 2024-11-11T12:44:12,936 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 8f0ff88ef2dc45ff952621cb7a0be07e, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1731329051711 2024-11-11T12:44:12,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742443_1619 (size=4469) 2024-11-11T12:44:12,939 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#A#compaction#519 average throughput is 0.81 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:12,940 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/d0bd41310fa94f7684538ce4fd70f9d0 is 175, key is test_row_0/A:col10/1731329051711/Put/seqid=0 2024-11-11T12:44:12,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742444_1620 (size=31447) 2024-11-11T12:44:12,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:12,946 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:12,947 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#C#compaction#522 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:12,947 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/c72ebf370ced4b26836fddca49cd9752 is 50, key is test_row_0/C:col10/1731329051711/Put/seqid=0 2024-11-11T12:44:12,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742445_1621 (size=12493) 2024-11-11T12:44:12,952 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/d0bd41310fa94f7684538ce4fd70f9d0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/d0bd41310fa94f7684538ce4fd70f9d0 2024-11-11T12:44:12,955 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/c72ebf370ced4b26836fddca49cd9752 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c72ebf370ced4b26836fddca49cd9752 2024-11-11T12:44:12,958 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/A of cc6fe7bde4d6aa548700eb200610e776 into d0bd41310fa94f7684538ce4fd70f9d0(size=30.7 K), total size for store is 30.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:12,958 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:12,958 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/A, priority=12, startTime=1731329052887; duration=0sec 2024-11-11T12:44:12,958 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:12,958 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:12,963 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/C of cc6fe7bde4d6aa548700eb200610e776 into c72ebf370ced4b26836fddca49cd9752(size=12.2 K), total size for store is 12.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:12,963 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:12,963 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/C, priority=12, startTime=1731329052887; duration=0sec 2024-11-11T12:44:12,964 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:12,964 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:12,971 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,971 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 96 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329112967, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329112968, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 77 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329112969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,971 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329112969, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:12,973 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:12,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329112971, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329113072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,073 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329113072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,073 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 79 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329113072, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,074 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329113073, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,075 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329113074, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,276 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329113275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329113275, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,277 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 81 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329113276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329113276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,278 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 89 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329113276, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,323 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:13,327 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411118c111e5583c7418f860c4ed9978e0347_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411118c111e5583c7418f860c4ed9978e0347_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:13,332 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5a1c52058ca045059b6e347791dbc0f8, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:13,333 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5a1c52058ca045059b6e347791dbc0f8 is 175, key is test_row_0/A:col10/1731329051825/Put/seqid=0 2024-11-11T12:44:13,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742446_1622 (size=31105) 2024-11-11T12:44:13,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-11T12:44:13,581 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329113580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,582 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329113580, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329113588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,588 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329113588, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,589 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:13,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 83 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329113589, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:13,753 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=170, memsize=20.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5a1c52058ca045059b6e347791dbc0f8 2024-11-11T12:44:13,773 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/f04e418476364200bff91955c3ab27c8 is 50, key is test_row_0/B:col10/1731329051825/Put/seqid=0 2024-11-11T12:44:13,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742447_1623 (size=12151) 2024-11-11T12:44:14,091 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:14,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 87 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329114090, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:14,092 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:14,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 85 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329114092, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:14,095 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:14,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329114093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:14,096 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:14,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 93 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329114096, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:14,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:14,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 95 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329114100, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:14,204 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/f04e418476364200bff91955c3ab27c8 2024-11-11T12:44:14,211 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/cb1eb291e80f4b5bab5518111ce7ac3f is 50, key is test_row_0/C:col10/1731329051825/Put/seqid=0 2024-11-11T12:44:14,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742448_1624 (size=12151) 2024-11-11T12:44:14,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-11T12:44:14,660 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=20.13 KB at sequenceid=170 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/cb1eb291e80f4b5bab5518111ce7ac3f 2024-11-11T12:44:14,676 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5a1c52058ca045059b6e347791dbc0f8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a1c52058ca045059b6e347791dbc0f8 2024-11-11T12:44:14,679 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a1c52058ca045059b6e347791dbc0f8, entries=150, sequenceid=170, filesize=30.4 K 2024-11-11T12:44:14,688 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/f04e418476364200bff91955c3ab27c8 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/f04e418476364200bff91955c3ab27c8 2024-11-11T12:44:14,694 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/f04e418476364200bff91955c3ab27c8, entries=150, sequenceid=170, filesize=11.9 K 2024-11-11T12:44:14,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/cb1eb291e80f4b5bab5518111ce7ac3f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb1eb291e80f4b5bab5518111ce7ac3f 2024-11-11T12:44:14,703 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb1eb291e80f4b5bab5518111ce7ac3f, entries=150, sequenceid=170, filesize=11.9 K 2024-11-11T12:44:14,705 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(3040): Finished flush of dataSize ~60.38 KB/61830, heapSize ~158.91 KB/162720, currentSize=140.89 KB/144270 for cc6fe7bde4d6aa548700eb200610e776 in 1809ms, sequenceid=170, compaction requested=false 2024-11-11T12:44:14,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:14,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:14,705 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=166}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=166 2024-11-11T12:44:14,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=166 2024-11-11T12:44:14,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=165 2024-11-11T12:44:14,711 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.4270 sec 2024-11-11T12:44:14,713 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=165, table=TestAcidGuarantees in 2.4320 sec 2024-11-11T12:44:15,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:15,095 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:44:15,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:15,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:15,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:15,096 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,103 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411114d38fbc8afa240d093c0d52ded8606c7_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329052967/Put/seqid=0 2024-11-11T12:44:15,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329115106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329115106, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 91 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329115107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 92 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329115107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,109 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 97 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329115108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742449_1625 (size=12304) 2024-11-11T12:44:15,115 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:15,118 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411114d38fbc8afa240d093c0d52ded8606c7_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411114d38fbc8afa240d093c0d52ded8606c7_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:15,119 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/47dd6793c8d64f14a37fb426e2e8cbcb, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:15,120 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/47dd6793c8d64f14a37fb426e2e8cbcb is 175, key is test_row_0/A:col10/1731329052967/Put/seqid=0 2024-11-11T12:44:15,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742450_1626 (size=31105) 2024-11-11T12:44:15,134 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=198, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/47dd6793c8d64f14a37fb426e2e8cbcb 2024-11-11T12:44:15,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/9a3d862214ab469ea102609ee95114df is 50, key is test_row_0/B:col10/1731329052967/Put/seqid=0 2024-11-11T12:44:15,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742451_1627 (size=12151) 2024-11-11T12:44:15,150 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/9a3d862214ab469ea102609ee95114df 2024-11-11T12:44:15,158 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/08dbd748b75a44e98028212f6fafdbf7 is 50, key is test_row_0/C:col10/1731329052967/Put/seqid=0 2024-11-11T12:44:15,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742452_1628 (size=12151) 2024-11-11T12:44:15,189 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=198 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/08dbd748b75a44e98028212f6fafdbf7 2024-11-11T12:44:15,194 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/47dd6793c8d64f14a37fb426e2e8cbcb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/47dd6793c8d64f14a37fb426e2e8cbcb 2024-11-11T12:44:15,198 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/47dd6793c8d64f14a37fb426e2e8cbcb, entries=150, sequenceid=198, filesize=30.4 K 2024-11-11T12:44:15,199 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreScanner(1000): StoreScanner already closing. There is no need to updateReaders 2024-11-11T12:44:15,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/9a3d862214ab469ea102609ee95114df as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9a3d862214ab469ea102609ee95114df 2024-11-11T12:44:15,203 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9a3d862214ab469ea102609ee95114df, entries=150, sequenceid=198, filesize=11.9 K 2024-11-11T12:44:15,204 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/08dbd748b75a44e98028212f6fafdbf7 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/08dbd748b75a44e98028212f6fafdbf7 2024-11-11T12:44:15,207 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/08dbd748b75a44e98028212f6fafdbf7, entries=150, sequenceid=198, filesize=11.9 K 2024-11-11T12:44:15,214 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=60.38 KB/61830 for cc6fe7bde4d6aa548700eb200610e776 in 118ms, sequenceid=198, compaction requested=true 2024-11-11T12:44:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:15,214 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:44:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:44:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:15,214 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:44:15,214 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:15,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:15,215 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 93657 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:15,215 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/A is initiating minor compaction (all files) 2024-11-11T12:44:15,215 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/A in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:15,215 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/d0bd41310fa94f7684538ce4fd70f9d0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a1c52058ca045059b6e347791dbc0f8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/47dd6793c8d64f14a37fb426e2e8cbcb] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=91.5 K 2024-11-11T12:44:15,215 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:15,215 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/d0bd41310fa94f7684538ce4fd70f9d0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a1c52058ca045059b6e347791dbc0f8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/47dd6793c8d64f14a37fb426e2e8cbcb] 2024-11-11T12:44:15,216 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=80.51 KB heapSize=211.69 KB 2024-11-11T12:44:15,216 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting d0bd41310fa94f7684538ce4fd70f9d0, keycount=150, bloomtype=ROW, size=30.7 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1731329051711 2024-11-11T12:44:15,216 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36795 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:15,216 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/B is initiating minor compaction (all files) 2024-11-11T12:44:15,216 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/B in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:15,216 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a9a287fc98d344009c41de6b99aa9414, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/f04e418476364200bff91955c3ab27c8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9a3d862214ab469ea102609ee95114df] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=35.9 K 2024-11-11T12:44:15,216 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:15,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:15,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:15,217 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,217 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a1c52058ca045059b6e347791dbc0f8, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731329051825 2024-11-11T12:44:15,217 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting a9a287fc98d344009c41de6b99aa9414, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1731329051711 2024-11-11T12:44:15,217 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 47dd6793c8d64f14a37fb426e2e8cbcb, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1731329052967 2024-11-11T12:44:15,217 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f04e418476364200bff91955c3ab27c8, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731329051825 2024-11-11T12:44:15,218 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 9a3d862214ab469ea102609ee95114df, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1731329052967 2024-11-11T12:44:15,224 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e2a7f0d2196b4d1bb2212515e5578660_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329055215/Put/seqid=0 2024-11-11T12:44:15,233 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:15,234 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#B#compaction#530 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:15,235 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/0ba040f816804f498c53801988e44f8c is 50, key is test_row_0/B:col10/1731329052967/Put/seqid=0 2024-11-11T12:44:15,244 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111a5bc5cf71aa74b61a365fdc6ba333808_cc6fe7bde4d6aa548700eb200610e776 store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:15,245 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111a5bc5cf71aa74b61a365fdc6ba333808_cc6fe7bde4d6aa548700eb200610e776, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:15,245 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111a5bc5cf71aa74b61a365fdc6ba333808_cc6fe7bde4d6aa548700eb200610e776 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:15,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742455_1631 (size=4469) 2024-11-11T12:44:15,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742454_1630 (size=12595) 2024-11-11T12:44:15,284 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 98 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329115242, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,289 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329115285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,290 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329115285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 105 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329115285, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742453_1629 (size=14794) 2024-11-11T12:44:15,295 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:15,299 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e2a7f0d2196b4d1bb2212515e5578660_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e2a7f0d2196b4d1bb2212515e5578660_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:15,303 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/ba56e1ff3ab44267877d80d6c94af68f, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:15,304 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/ba56e1ff3ab44267877d80d6c94af68f is 175, key is test_row_0/A:col10/1731329055215/Put/seqid=0 2024-11-11T12:44:15,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742456_1632 (size=39749) 2024-11-11T12:44:15,320 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=214, memsize=29.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/ba56e1ff3ab44267877d80d6c94af68f 2024-11-11T12:44:15,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/9d74ab3bf71e43889ee6db706a5c274a is 50, key is test_row_0/B:col10/1731329055215/Put/seqid=0 2024-11-11T12:44:15,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742457_1633 (size=12151) 2024-11-11T12:44:15,362 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/9d74ab3bf71e43889ee6db706a5c274a 2024-11-11T12:44:15,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a is 50, key is test_row_0/C:col10/1731329055215/Put/seqid=0 2024-11-11T12:44:15,396 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329115392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 100 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329115392, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 116 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329115394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,397 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,397 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329115394, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742458_1634 (size=12151) 2024-11-11T12:44:15,412 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=29.07 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a 2024-11-11T12:44:15,421 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/ba56e1ff3ab44267877d80d6c94af68f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/ba56e1ff3ab44267877d80d6c94af68f 2024-11-11T12:44:15,428 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/ba56e1ff3ab44267877d80d6c94af68f, entries=200, sequenceid=214, filesize=38.8 K 2024-11-11T12:44:15,429 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/9d74ab3bf71e43889ee6db706a5c274a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9d74ab3bf71e43889ee6db706a5c274a 2024-11-11T12:44:15,436 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9d74ab3bf71e43889ee6db706a5c274a, entries=150, sequenceid=214, filesize=11.9 K 2024-11-11T12:44:15,437 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a 2024-11-11T12:44:15,441 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a, entries=150, sequenceid=214, filesize=11.9 K 2024-11-11T12:44:15,442 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~87.22 KB/89310, heapSize ~229.22 KB/234720, currentSize=120.76 KB/123660 for cc6fe7bde4d6aa548700eb200610e776 in 226ms, sequenceid=214, compaction requested=true 2024-11-11T12:44:15,442 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:15,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:15,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:44:15,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:15,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=3), splitQueue=0 2024-11-11T12:44:15,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:15,443 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-11T12:44:15,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:15,601 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=127.47 KB heapSize=334.73 KB 2024-11-11T12:44:15,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:15,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:15,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:15,602 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:15,614 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111a53b3b31e5344f4dacabffa6d69adb9e_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329055247/Put/seqid=0 2024-11-11T12:44:15,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 107 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329115615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,618 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 104 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329115615, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,620 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329115617, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,622 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 121 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329115618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742459_1635 (size=14794) 2024-11-11T12:44:15,643 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:15,679 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111a53b3b31e5344f4dacabffa6d69adb9e_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111a53b3b31e5344f4dacabffa6d69adb9e_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:15,684 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2d5b986cf3444fd28f85f0818cbcd247, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:15,685 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2d5b986cf3444fd28f85f0818cbcd247 is 175, key is test_row_0/A:col10/1731329055247/Put/seqid=0 2024-11-11T12:44:15,688 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#A#compaction#529 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:15,689 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/a7d1b6ede9d5482fb16d2e041bdbc789 is 175, key is test_row_0/A:col10/1731329052967/Put/seqid=0 2024-11-11T12:44:15,701 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/0ba040f816804f498c53801988e44f8c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/0ba040f816804f498c53801988e44f8c 2024-11-11T12:44:15,707 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/B of cc6fe7bde4d6aa548700eb200610e776 into 0ba040f816804f498c53801988e44f8c(size=12.3 K), total size for store is 24.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:15,707 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:15,708 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/B, priority=13, startTime=1731329055214; duration=0sec 2024-11-11T12:44:15,708 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=4), splitQueue=0 2024-11-11T12:44:15,708 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:15,708 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 3 compacting, 1 eligible, 16 blocking 2024-11-11T12:44:15,712 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:44:15,712 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:44:15,712 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. because compaction request was cancelled 2024-11-11T12:44:15,712 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:15,712 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:15,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 109 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329115719, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,726 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 48946 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:15,726 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/C is initiating minor compaction (all files) 2024-11-11T12:44:15,726 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/C in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:15,726 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c72ebf370ced4b26836fddca49cd9752, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb1eb291e80f4b5bab5518111ce7ac3f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/08dbd748b75a44e98028212f6fafdbf7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=47.8 K 2024-11-11T12:44:15,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742460_1636 (size=39749) 2024-11-11T12:44:15,727 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 115 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329115725, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,727 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=237, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2d5b986cf3444fd28f85f0818cbcd247 2024-11-11T12:44:15,730 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 106 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329115726, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,731 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting c72ebf370ced4b26836fddca49cd9752, keycount=150, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=158, earliestPutTs=1731329051711 2024-11-11T12:44:15,733 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting cb1eb291e80f4b5bab5518111ce7ac3f, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=170, earliestPutTs=1731329051825 2024-11-11T12:44:15,734 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 08dbd748b75a44e98028212f6fafdbf7, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1731329052967 2024-11-11T12:44:15,735 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3d2f1dfab6d1473ea5b35bf54ab0ed1a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731329055104 2024-11-11T12:44:15,738 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 123 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329115733, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,760 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#C#compaction#534 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:15,761 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/cb99d435689a4d3c8deda76e2f119912 is 50, key is test_row_0/C:col10/1731329055215/Put/seqid=0 2024-11-11T12:44:15,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742461_1637 (size=31549) 2024-11-11T12:44:15,768 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/aa8b38c5e2694d90a788a430e6c67b7b is 50, key is test_row_0/B:col10/1731329055247/Put/seqid=0 2024-11-11T12:44:15,777 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/a7d1b6ede9d5482fb16d2e041bdbc789 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/a7d1b6ede9d5482fb16d2e041bdbc789 2024-11-11T12:44:15,782 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/A of cc6fe7bde4d6aa548700eb200610e776 into a7d1b6ede9d5482fb16d2e041bdbc789(size=30.8 K), total size for store is 69.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:15,782 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/A, priority=13, startTime=1731329055214; duration=0sec 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 4 compacting, 0 eligible, 16 blocking 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. because compaction request was cancelled 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:15,782 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-11T12:44:15,784 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:44:15,784 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:44:15,784 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. because compaction request was cancelled 2024-11-11T12:44:15,784 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:15,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742462_1638 (size=12629) 2024-11-11T12:44:15,824 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/cb99d435689a4d3c8deda76e2f119912 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb99d435689a4d3c8deda76e2f119912 2024-11-11T12:44:15,844 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/C of cc6fe7bde4d6aa548700eb200610e776 into cb99d435689a4d3c8deda76e2f119912(size=12.3 K), total size for store is 12.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:15,844 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:15,844 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/C, priority=12, startTime=1731329055443; duration=0sec 2024-11-11T12:44:15,844 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:15,844 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:15,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742463_1639 (size=12151) 2024-11-11T12:44:15,853 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/aa8b38c5e2694d90a788a430e6c67b7b 2024-11-11T12:44:15,886 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/3e2d70789552433193b614f7209edf10 is 50, key is test_row_0/C:col10/1731329055247/Put/seqid=0 2024-11-11T12:44:15,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742464_1640 (size=12151) 2024-11-11T12:44:15,933 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 111 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329115930, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 117 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329115934, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,937 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329115936, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:15,946 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:15,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329115944, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,239 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 113 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329116237, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,242 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,242 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329116240, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,242 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 119 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329116241, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,251 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,252 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329116249, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,323 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=237 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/3e2d70789552433193b614f7209edf10 2024-11-11T12:44:16,327 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2d5b986cf3444fd28f85f0818cbcd247 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2d5b986cf3444fd28f85f0818cbcd247 2024-11-11T12:44:16,331 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2d5b986cf3444fd28f85f0818cbcd247, entries=200, sequenceid=237, filesize=38.8 K 2024-11-11T12:44:16,332 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/aa8b38c5e2694d90a788a430e6c67b7b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/aa8b38c5e2694d90a788a430e6c67b7b 2024-11-11T12:44:16,335 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/aa8b38c5e2694d90a788a430e6c67b7b, entries=150, sequenceid=237, filesize=11.9 K 2024-11-11T12:44:16,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/3e2d70789552433193b614f7209edf10 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3e2d70789552433193b614f7209edf10 2024-11-11T12:44:16,342 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3e2d70789552433193b614f7209edf10, entries=150, sequenceid=237, filesize=11.9 K 2024-11-11T12:44:16,343 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=67.09 KB/68700 for cc6fe7bde4d6aa548700eb200610e776 in 742ms, sequenceid=237, compaction requested=true 2024-11-11T12:44:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:44:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:16,343 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:16,343 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:44:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:16,343 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:16,352 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 111047 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:16,352 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/A is initiating minor compaction (all files) 2024-11-11T12:44:16,352 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/A in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:16,352 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/a7d1b6ede9d5482fb16d2e041bdbc789, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/ba56e1ff3ab44267877d80d6c94af68f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2d5b986cf3444fd28f85f0818cbcd247] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=108.4 K 2024-11-11T12:44:16,352 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:16,352 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/a7d1b6ede9d5482fb16d2e041bdbc789, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/ba56e1ff3ab44267877d80d6c94af68f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2d5b986cf3444fd28f85f0818cbcd247] 2024-11-11T12:44:16,353 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 36897 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:16,353 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/B is initiating minor compaction (all files) 2024-11-11T12:44:16,353 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/B in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:16,353 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/0ba040f816804f498c53801988e44f8c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9d74ab3bf71e43889ee6db706a5c274a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/aa8b38c5e2694d90a788a430e6c67b7b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=36.0 K 2024-11-11T12:44:16,356 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting a7d1b6ede9d5482fb16d2e041bdbc789, keycount=150, bloomtype=ROW, size=30.8 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1731329052967 2024-11-11T12:44:16,357 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 0ba040f816804f498c53801988e44f8c, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=198, earliestPutTs=1731329052967 2024-11-11T12:44:16,360 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting ba56e1ff3ab44267877d80d6c94af68f, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731329055104 2024-11-11T12:44:16,360 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 9d74ab3bf71e43889ee6db706a5c274a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731329055104 2024-11-11T12:44:16,360 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2d5b986cf3444fd28f85f0818cbcd247, keycount=200, bloomtype=ROW, size=38.8 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329055242 2024-11-11T12:44:16,361 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting aa8b38c5e2694d90a788a430e6c67b7b, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329055247 2024-11-11T12:44:16,373 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#B#compaction#537 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:16,374 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/786f726d3edc46b29dbf8103b68928d2 is 50, key is test_row_0/B:col10/1731329055247/Put/seqid=0 2024-11-11T12:44:16,380 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:16,386 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e202411110921987c1c6e41439cc76f7d7dc62b22_cc6fe7bde4d6aa548700eb200610e776 store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:16,387 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=165 2024-11-11T12:44:16,387 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 165 completed 2024-11-11T12:44:16,388 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e202411110921987c1c6e41439cc76f7d7dc62b22_cc6fe7bde4d6aa548700eb200610e776, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:16,388 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110921987c1c6e41439cc76f7d7dc62b22_cc6fe7bde4d6aa548700eb200610e776 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:16,389 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:16,390 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees 2024-11-11T12:44:16,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-11T12:44:16,396 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:16,397 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=167, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:16,397 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:16,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742465_1641 (size=12697) 2024-11-11T12:44:16,412 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/786f726d3edc46b29dbf8103b68928d2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/786f726d3edc46b29dbf8103b68928d2 2024-11-11T12:44:16,420 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/B of cc6fe7bde4d6aa548700eb200610e776 into 786f726d3edc46b29dbf8103b68928d2(size=12.4 K), total size for store is 12.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:16,420 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:16,420 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/B, priority=13, startTime=1731329056343; duration=0sec 2024-11-11T12:44:16,420 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:16,420 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:16,420 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 2 store files, 0 compacting, 2 eligible, 16 blocking 2024-11-11T12:44:16,421 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 0 files of size 0 starting at candidate #-1 after considering 0 permutations with 0 in ratio 2024-11-11T12:44:16,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742466_1642 (size=4469) 2024-11-11T12:44:16,421 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(232): Not compacting files because we only have 0 files ready for compaction. Need 3 to initiate. 2024-11-11T12:44:16,421 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit(450): Not compacting TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. because compaction request was cancelled 2024-11-11T12:44:16,421 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-11T12:44:16,550 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,556 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=168 2024-11-11T12:44:16,556 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:16,556 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=67.09 KB heapSize=176.53 KB 2024-11-11T12:44:16,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:16,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:16,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:16,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:16,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:16,557 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:16,575 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ebd8232a67d14529b8d28ec61b9e71b6_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329055617/Put/seqid=0 2024-11-11T12:44:16,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742467_1643 (size=12304) 2024-11-11T12:44:16,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-11T12:44:16,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:16,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:16,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329116771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,774 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 125 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329116771, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,776 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329116773, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,776 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 132 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329116774, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,832 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#A#compaction#538 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:16,833 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5e409630c1354b9ea81d8c256a920003 is 175, key is test_row_0/A:col10/1731329055247/Put/seqid=0 2024-11-11T12:44:16,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742468_1644 (size=31651) 2024-11-11T12:44:16,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 127 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329116875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,877 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329116875, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329116878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,879 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:16,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329116878, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:16,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-11T12:44:17,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,009 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111ebd8232a67d14529b8d28ec61b9e71b6_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ebd8232a67d14529b8d28ec61b9e71b6_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:17,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/70963d4f757047ec8f3e2e52b21408de, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:17,010 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/70963d4f757047ec8f3e2e52b21408de is 175, key is test_row_0/A:col10/1731329055617/Put/seqid=0 2024-11-11T12:44:17,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742469_1645 (size=31105) 2024-11-11T12:44:17,082 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329117080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,082 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 126 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329117080, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 136 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329117081, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,083 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 129 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329117082, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,129 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 99 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329117127, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,130 DEBUG [Thread-2549 {}] client.RpcRetryingCallerImpl(129): Call exception, tries=6, retries=16, started=4158 ms ago, cancelled=false, msg=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) , details=row 'test_row_1' on table 'TestAcidGuarantees' at region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., hostname=32e78532c8b1,44673,1731328897232, seqNum=5, see https://s.apache.org/timeout, exception=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at jdk.internal.reflect.GeneratedConstructorAccessor40.newInstance(Unknown Source) at java.base/jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) at java.base/java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) at java.base/java.lang.reflect.Constructor.newInstance(Constructor.java:480) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.makeIOExceptionOfException(ProtobufUtil.java:280) at org.apache.hadoop.hbase.protobuf.ProtobufUtil.handleRemoteException(ProtobufUtil.java:265) at org.apache.hadoop.hbase.client.RegionServerCallable.call(RegionServerCallable.java:133) at org.apache.hadoop.hbase.client.RpcRetryingCallerImpl.callWithRetries(RpcRetryingCallerImpl.java:104) at org.apache.hadoop.hbase.client.HTable.lambda$put$3(HTable.java:578) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.client.HTable.put(HTable.java:565) at org.apache.hadoop.hbase.AcidGuaranteesTestTool$AtomicityWriter.doAnAction(AcidGuaranteesTestTool.java:169) at org.apache.hadoop.hbase.MultithreadedTestUtil$RepeatingTestThread.doWork(MultithreadedTestUtil.java:149) at org.apache.hadoop.hbase.MultithreadedTestUtil$TestThread.run(MultithreadedTestUtil.java:123) Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException(org.apache.hadoop.hbase.RegionTooBusyException): org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.access$100(AbstractRpcClient.java:94) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:116) at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:131) at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) at org.apache.hbase.thirdparty.io.netty.channel.nio.AbstractNioByteChannel$NioByteUnsafe.read(AbstractNioByteChannel.java:166) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKey(NioEventLoop.java:788) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeysOptimized(NioEventLoop.java:724) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.processSelectedKeys(NioEventLoop.java:650) at org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:562) at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-11T12:44:17,267 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5e409630c1354b9ea81d8c256a920003 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5e409630c1354b9ea81d8c256a920003 2024-11-11T12:44:17,271 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/A of cc6fe7bde4d6aa548700eb200610e776 into 5e409630c1354b9ea81d8c256a920003(size=30.9 K), total size for store is 30.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:17,272 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:17,272 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/A, priority=13, startTime=1731329056343; duration=0sec 2024-11-11T12:44:17,272 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:17,272 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:17,385 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329117384, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 138 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329117385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,386 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 131 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329117385, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,389 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,389 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329117386, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,420 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=254, memsize=22.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/70963d4f757047ec8f3e2e52b21408de 2024-11-11T12:44:17,428 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e5311a9ad3004c1198e31469ae317807 is 50, key is test_row_0/B:col10/1731329055617/Put/seqid=0 2024-11-11T12:44:17,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742470_1646 (size=12151) 2024-11-11T12:44:17,436 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e5311a9ad3004c1198e31469ae317807 2024-11-11T12:44:17,448 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/2babbc458c114cdcb5dfd9014b1b137a is 50, key is test_row_0/C:col10/1731329055617/Put/seqid=0 2024-11-11T12:44:17,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742471_1647 (size=12151) 2024-11-11T12:44:17,455 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.36 KB at sequenceid=254 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/2babbc458c114cdcb5dfd9014b1b137a 2024-11-11T12:44:17,460 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/70963d4f757047ec8f3e2e52b21408de as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/70963d4f757047ec8f3e2e52b21408de 2024-11-11T12:44:17,465 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/70963d4f757047ec8f3e2e52b21408de, entries=150, sequenceid=254, filesize=30.4 K 2024-11-11T12:44:17,466 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e5311a9ad3004c1198e31469ae317807 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e5311a9ad3004c1198e31469ae317807 2024-11-11T12:44:17,470 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e5311a9ad3004c1198e31469ae317807, entries=150, sequenceid=254, filesize=11.9 K 2024-11-11T12:44:17,471 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/2babbc458c114cdcb5dfd9014b1b137a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/2babbc458c114cdcb5dfd9014b1b137a 2024-11-11T12:44:17,476 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/2babbc458c114cdcb5dfd9014b1b137a, entries=150, sequenceid=254, filesize=11.9 K 2024-11-11T12:44:17,478 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(3040): Finished flush of dataSize ~67.09 KB/68700, heapSize ~176.48 KB/180720, currentSize=134.18 KB/137400 for cc6fe7bde4d6aa548700eb200610e776 in 922ms, sequenceid=254, compaction requested=true 2024-11-11T12:44:17,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:17,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:17,478 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-11-11T12:44:17,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-11-11T12:44:17,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-11-11T12:44:17,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.0830 sec 2024-11-11T12:44:17,488 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=167, table=TestAcidGuarantees in 1.0930 sec 2024-11-11T12:44:17,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-11-11T12:44:17,505 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 167 completed 2024-11-11T12:44:17,508 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:17,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees 2024-11-11T12:44:17,510 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:17,510 INFO [PEWorker-3 {}] procedure.FlushTableProcedure(91): pid=169, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:17,510 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=170, ppid=169, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:17,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-11T12:44:17,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-11T12:44:17,663 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=170 2024-11-11T12:44:17,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:17,668 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=134.18 KB heapSize=352.31 KB 2024-11-11T12:44:17,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:17,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:17,668 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:17,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:17,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:17,669 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:17,675 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411119d1ccaddbe2c49039e2d12a056fea511_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329056771/Put/seqid=0 2024-11-11T12:44:17,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742472_1648 (size=12454) 2024-11-11T12:44:17,711 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,716 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411119d1ccaddbe2c49039e2d12a056fea511_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119d1ccaddbe2c49039e2d12a056fea511_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:17,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/35dff6efa28c4733a383d2246ae3e229, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:17,717 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/35dff6efa28c4733a383d2246ae3e229 is 175, key is test_row_0/A:col10/1731329056771/Put/seqid=0 2024-11-11T12:44:17,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742473_1649 (size=31255) 2024-11-11T12:44:17,737 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=278, memsize=44.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/35dff6efa28c4733a383d2246ae3e229 2024-11-11T12:44:17,755 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/98656308faa1455ab4ba6462d4eff025 is 50, key is test_row_0/B:col10/1731329056771/Put/seqid=0 2024-11-11T12:44:17,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742474_1650 (size=12301) 2024-11-11T12:44:17,759 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/98656308faa1455ab4ba6462d4eff025 2024-11-11T12:44:17,765 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/354f200c097e45b1b073e7c5978880b9 is 50, key is test_row_0/C:col10/1731329056771/Put/seqid=0 2024-11-11T12:44:17,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742475_1651 (size=12301) 2024-11-11T12:44:17,778 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=44.73 KB at sequenceid=278 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/354f200c097e45b1b073e7c5978880b9 2024-11-11T12:44:17,784 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/35dff6efa28c4733a383d2246ae3e229 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/35dff6efa28c4733a383d2246ae3e229 2024-11-11T12:44:17,795 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/35dff6efa28c4733a383d2246ae3e229, entries=150, sequenceid=278, filesize=30.5 K 2024-11-11T12:44:17,796 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/98656308faa1455ab4ba6462d4eff025 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/98656308faa1455ab4ba6462d4eff025 2024-11-11T12:44:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,800 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/98656308faa1455ab4ba6462d4eff025, entries=150, sequenceid=278, filesize=12.0 K 2024-11-11T12:44:17,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,800 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/354f200c097e45b1b073e7c5978880b9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/354f200c097e45b1b073e7c5978880b9 2024-11-11T12:44:17,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,804 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/354f200c097e45b1b073e7c5978880b9, entries=150, sequenceid=278, filesize=12.0 K 2024-11-11T12:44:17,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,805 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(3040): Finished flush of dataSize ~134.18 KB/137400, heapSize ~352.27 KB/360720, currentSize=0 B/0 for cc6fe7bde4d6aa548700eb200610e776 in 137ms, sequenceid=278, compaction requested=true 2024-11-11T12:44:17,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:17,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:17,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,805 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=170}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=170 2024-11-11T12:44:17,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=170 2024-11-11T12:44:17,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=170, resume processing ppid=169 2024-11-11T12:44:17,808 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, ppid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 296 msec 2024-11-11T12:44:17,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,809 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=169, table=TestAcidGuarantees in 300 msec 2024-11-11T12:44:17,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=169 2024-11-11T12:44:17,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,814 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 169 completed 2024-11-11T12:44:17,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,816 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:17,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees 2024-11-11T12:44:17,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,817 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:17,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-11T12:44:17,818 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=171, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:17,818 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=172, ppid=171, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:17,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,878 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,881 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,882 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,888 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,911 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-11T12:44:17,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,949 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:44:17,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:17,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:17,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:17,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:17,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:17,949 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:17,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:17,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,958 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111162e64ce74f2049a280f579698ebe97c9_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329057947/Put/seqid=0 2024-11-11T12:44:17,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,969 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,970 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:17,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:17,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:17,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:17,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,970 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:17,970 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:17,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:17,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742476_1652 (size=22618) 2024-11-11T12:44:17,977 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:17,984 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111162e64ce74f2049a280f579698ebe97c9_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111162e64ce74f2049a280f579698ebe97c9_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:17,985 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/aa835828f28c42089904c97744ed4207, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:17,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/aa835828f28c42089904c97744ed4207 is 175, key is test_row_0/A:col10/1731329057947/Put/seqid=0 2024-11-11T12:44:17,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329117987, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,992 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329117990, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,996 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 137 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329117992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,997 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:17,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 134 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329117992, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:17,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742477_1653 (size=66023) 2024-11-11T12:44:18,000 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=289, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/aa835828f28c42089904c97744ed4207 2024-11-11T12:44:18,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/b4df65cdd6c644fa920249f66b3c2598 is 50, key is test_row_0/B:col10/1731329057947/Put/seqid=0 2024-11-11T12:44:18,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742478_1654 (size=12301) 2024-11-11T12:44:18,051 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/b4df65cdd6c644fa920249f66b3c2598 2024-11-11T12:44:18,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/95340fc84232420b84762c80c4b7a8c3 is 50, key is test_row_0/C:col10/1731329057947/Put/seqid=0 2024-11-11T12:44:18,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742479_1655 (size=12301) 2024-11-11T12:44:18,076 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/95340fc84232420b84762c80c4b7a8c3 2024-11-11T12:44:18,081 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/aa835828f28c42089904c97744ed4207 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/aa835828f28c42089904c97744ed4207 2024-11-11T12:44:18,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/aa835828f28c42089904c97744ed4207, entries=350, sequenceid=289, filesize=64.5 K 2024-11-11T12:44:18,088 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/b4df65cdd6c644fa920249f66b3c2598 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b4df65cdd6c644fa920249f66b3c2598 2024-11-11T12:44:18,093 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b4df65cdd6c644fa920249f66b3c2598, entries=150, sequenceid=289, filesize=12.0 K 2024-11-11T12:44:18,094 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/95340fc84232420b84762c80c4b7a8c3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/95340fc84232420b84762c80c4b7a8c3 2024-11-11T12:44:18,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329118093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,098 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329118093, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,099 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/95340fc84232420b84762c80c4b7a8c3, entries=150, sequenceid=289, filesize=12.0 K 2024-11-11T12:44:18,100 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,100 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cc6fe7bde4d6aa548700eb200610e776 in 151ms, sequenceid=289, compaction requested=true 2024-11-11T12:44:18,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329118097, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:44:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:44:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:18,100 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:18,100 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:44:18,101 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 5 store files, 0 compacting, 5 eligible, 16 blocking 2024-11-11T12:44:18,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:18,101 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 160034 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:18,102 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/A is initiating minor compaction (all files) 2024-11-11T12:44:18,102 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/A in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,102 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5e409630c1354b9ea81d8c256a920003, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/70963d4f757047ec8f3e2e52b21408de, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/35dff6efa28c4733a383d2246ae3e229, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/aa835828f28c42089904c97744ed4207] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=156.3 K 2024-11-11T12:44:18,102 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,102 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5e409630c1354b9ea81d8c256a920003, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/70963d4f757047ec8f3e2e52b21408de, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/35dff6efa28c4733a383d2246ae3e229, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/aa835828f28c42089904c97744ed4207] 2024-11-11T12:44:18,102 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=154.31 KB heapSize=405.05 KB 2024-11-11T12:44:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:18,102 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:18,105 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 5 files of size 61533 starting at candidate #0 after considering 6 permutations with 6 in ratio 2024-11-11T12:44:18,105 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 5e409630c1354b9ea81d8c256a920003, keycount=150, bloomtype=ROW, size=30.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329055247 2024-11-11T12:44:18,105 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/C is initiating minor compaction (all files) 2024-11-11T12:44:18,105 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/C in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,105 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb99d435689a4d3c8deda76e2f119912, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3e2d70789552433193b614f7209edf10, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/2babbc458c114cdcb5dfd9014b1b137a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/354f200c097e45b1b073e7c5978880b9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/95340fc84232420b84762c80c4b7a8c3] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=60.1 K 2024-11-11T12:44:18,105 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting cb99d435689a4d3c8deda76e2f119912, keycount=150, bloomtype=ROW, size=12.3 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731329055104 2024-11-11T12:44:18,105 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 70963d4f757047ec8f3e2e52b21408de, keycount=150, bloomtype=ROW, size=30.4 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731329055611 2024-11-11T12:44:18,106 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 3e2d70789552433193b614f7209edf10, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329055247 2024-11-11T12:44:18,106 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 35dff6efa28c4733a383d2246ae3e229, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1731329056766 2024-11-11T12:44:18,106 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 2babbc458c114cdcb5dfd9014b1b137a, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731329055611 2024-11-11T12:44:18,107 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting aa835828f28c42089904c97744ed4207, keycount=350, bloomtype=ROW, size=64.5 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731329057913 2024-11-11T12:44:18,108 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 354f200c097e45b1b073e7c5978880b9, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1731329056766 2024-11-11T12:44:18,109 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 95340fc84232420b84762c80c4b7a8c3, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731329057937 2024-11-11T12:44:18,120 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-11T12:44:18,121 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111186f5e4d5bf2c4c7692026fb4d70880d2_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329058100/Put/seqid=0 2024-11-11T12:44:18,124 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,125 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:18,125 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:18,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:18,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,126 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,126 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,128 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#C#compaction#550 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:18,128 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/f1f63508abbe4682a1f9ff78b403ff42 is 50, key is test_row_0/C:col10/1731329057947/Put/seqid=0 2024-11-11T12:44:18,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,141 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 144 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329118137, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,145 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e20241111e59fd0c5124a42f8a8a3f948e49474cd_cc6fe7bde4d6aa548700eb200610e776 store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:18,147 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e20241111e59fd0c5124a42f8a8a3f948e49474cd_cc6fe7bde4d6aa548700eb200610e776, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:18,147 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241111e59fd0c5124a42f8a8a3f948e49474cd_cc6fe7bde4d6aa548700eb200610e776 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:18,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742480_1656 (size=14994) 2024-11-11T12:44:18,158 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:18,161 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111186f5e4d5bf2c4c7692026fb4d70880d2_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111186f5e4d5bf2c4c7692026fb4d70880d2_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:18,162 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/bffe84d15749455f9183e3edddb274f4, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:18,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/bffe84d15749455f9183e3edddb274f4 is 175, key is test_row_0/A:col10/1731329058100/Put/seqid=0 2024-11-11T12:44:18,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742481_1657 (size=12949) 2024-11-11T12:44:18,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742482_1658 (size=4469) 2024-11-11T12:44:18,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742483_1659 (size=39949) 2024-11-11T12:44:18,245 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 146 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329118243, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,280 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,281 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:18,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:18,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,281 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,281 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,282 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,302 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 151 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329118299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,302 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329118299, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,303 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329118302, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-11T12:44:18,434 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,434 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:18,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:18,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,435 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,436 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,449 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 148 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329118447, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,600 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,600 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:18,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:18,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,601 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,601 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,606 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 153 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329118604, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329118606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,608 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 147 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329118606, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,612 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/f1f63508abbe4682a1f9ff78b403ff42 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f1f63508abbe4682a1f9ff78b403ff42 2024-11-11T12:44:18,620 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#A#compaction#549 average throughput is 0.05 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:18,620 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/346d0f0fbfc447779bd6cd28ad6d6fcd is 175, key is test_row_0/A:col10/1731329057947/Put/seqid=0 2024-11-11T12:44:18,624 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 5 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/C of cc6fe7bde4d6aa548700eb200610e776 into f1f63508abbe4682a1f9ff78b403ff42(size=12.6 K), total size for store is 12.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:18,624 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:18,625 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/C, priority=11, startTime=1731329058100; duration=0sec 2024-11-11T12:44:18,625 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:18,625 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:18,625 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:18,640 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 49450 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:18,640 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/B is initiating minor compaction (all files) 2024-11-11T12:44:18,640 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/B in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,640 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/786f726d3edc46b29dbf8103b68928d2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e5311a9ad3004c1198e31469ae317807, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/98656308faa1455ab4ba6462d4eff025, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b4df65cdd6c644fa920249f66b3c2598] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=48.3 K 2024-11-11T12:44:18,644 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 786f726d3edc46b29dbf8103b68928d2, keycount=150, bloomtype=ROW, size=12.4 K, encoding=NONE, compression=NONE, seqNum=237, earliestPutTs=1731329055247 2024-11-11T12:44:18,645 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=315, memsize=51.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/bffe84d15749455f9183e3edddb274f4 2024-11-11T12:44:18,650 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting e5311a9ad3004c1198e31469ae317807, keycount=150, bloomtype=ROW, size=11.9 K, encoding=NONE, compression=NONE, seqNum=254, earliestPutTs=1731329055611 2024-11-11T12:44:18,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742484_1660 (size=31937) 2024-11-11T12:44:18,653 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 98656308faa1455ab4ba6462d4eff025, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=278, earliestPutTs=1731329056766 2024-11-11T12:44:18,657 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting b4df65cdd6c644fa920249f66b3c2598, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731329057937 2024-11-11T12:44:18,697 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/346d0f0fbfc447779bd6cd28ad6d6fcd as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/346d0f0fbfc447779bd6cd28ad6d6fcd 2024-11-11T12:44:18,706 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/bbd1a3751cea497389678b177d445204 is 50, key is test_row_0/B:col10/1731329058100/Put/seqid=0 2024-11-11T12:44:18,720 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/A of cc6fe7bde4d6aa548700eb200610e776 into 346d0f0fbfc447779bd6cd28ad6d6fcd(size=31.2 K), total size for store is 31.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:18,720 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:18,720 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/A, priority=12, startTime=1731329058100; duration=0sec 2024-11-11T12:44:18,720 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:18,721 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:18,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742485_1661 (size=12301) 2024-11-11T12:44:18,735 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#B#compaction#552 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:18,736 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/83f09d85faa24702b096f12395c894e2 is 50, key is test_row_0/B:col10/1731329057947/Put/seqid=0 2024-11-11T12:44:18,751 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:18,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 150 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329118750, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742486_1662 (size=12983) 2024-11-11T12:44:18,762 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,762 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,763 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,763 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,764 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/83f09d85faa24702b096f12395c894e2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/83f09d85faa24702b096f12395c894e2 2024-11-11T12:44:18,772 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/B of cc6fe7bde4d6aa548700eb200610e776 into 83f09d85faa24702b096f12395c894e2(size=12.7 K), total size for store is 12.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:18,772 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:18,772 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/B, priority=12, startTime=1731329058100; duration=0sec 2024-11-11T12:44:18,772 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:18,772 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:18,920 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:18,920 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:18,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:18,920 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:18,920 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,921 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:18,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-11T12:44:19,073 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,076 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:19,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:19,076 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,076 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,077 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,111 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:19,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 155 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329119110, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,114 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:19,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 149 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329119112, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,117 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:19,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329119116, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,136 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/bbd1a3751cea497389678b177d445204 2024-11-11T12:44:19,154 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/4172af86cb6640f0b68d72af7d48ab1b is 50, key is test_row_0/C:col10/1731329058100/Put/seqid=0 2024-11-11T12:44:19,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742487_1663 (size=12301) 2024-11-11T12:44:19,232 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,232 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:19,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:19,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,236 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,236 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,256 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:19,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 152 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329119255, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,388 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,392 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:19,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:19,392 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,392 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,393 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,546 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,546 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:19,546 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,546 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] handler.RSProcedureHandler(58): pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,547 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=172 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=172 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:19,606 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=51.44 KB at sequenceid=315 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/4172af86cb6640f0b68d72af7d48ab1b 2024-11-11T12:44:19,614 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/bffe84d15749455f9183e3edddb274f4 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/bffe84d15749455f9183e3edddb274f4 2024-11-11T12:44:19,619 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/bffe84d15749455f9183e3edddb274f4, entries=200, sequenceid=315, filesize=39.0 K 2024-11-11T12:44:19,620 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/bbd1a3751cea497389678b177d445204 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbd1a3751cea497389678b177d445204 2024-11-11T12:44:19,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,629 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbd1a3751cea497389678b177d445204, entries=150, sequenceid=315, filesize=12.0 K 2024-11-11T12:44:19,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,631 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/4172af86cb6640f0b68d72af7d48ab1b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/4172af86cb6640f0b68d72af7d48ab1b 2024-11-11T12:44:19,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/4172af86cb6640f0b68d72af7d48ab1b, entries=150, sequenceid=315, filesize=12.0 K 2024-11-11T12:44:19,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,640 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~154.31 KB/158010, heapSize ~405 KB/414720, currentSize=46.96 KB/48090 for cc6fe7bde4d6aa548700eb200610e776 in 1538ms, sequenceid=315, compaction requested=false 2024-11-11T12:44:19,640 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:19,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,699 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:19,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,699 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=172 2024-11-11T12:44:19,699 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:19,699 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=46.96 KB heapSize=123.80 KB 2024-11-11T12:44:19,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:19,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:19,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:19,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:19,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:19,700 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:19,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,708 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411117897f6e5ad894468b3848deff297f8cb_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329058136/Put/seqid=0 2024-11-11T12:44:19,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742488_1664 (size=9914) 2024-11-11T12:44:19,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,793 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,797 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,800 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,813 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,817 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,818 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,827 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,828 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,834 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,836 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,843 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,852 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,855 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,864 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,875 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,899 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,918 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,919 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,920 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-11T12:44:19,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,928 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,950 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,954 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,955 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,964 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,966 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,968 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,974 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,975 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,983 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,985 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,986 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,987 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,991 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,995 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,996 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:19,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,000 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,002 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,003 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,004 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,008 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,010 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,013 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,015 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,018 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,022 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,023 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,035 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,047 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,066 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,071 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,073 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,075 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,077 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,083 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,085 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,098 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,102 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,115 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,119 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,119 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,121 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:20,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:20,123 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411117897f6e5ad894468b3848deff297f8cb_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411117897f6e5ad894468b3848deff297f8cb_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:20,124 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/296485ea6fc64f128cec6c3c4a101f64, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:20,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,125 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/296485ea6fc64f128cec6c3c4a101f64 is 175, key is test_row_0/A:col10/1731329058136/Put/seqid=0 2024-11-11T12:44:20,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742489_1665 (size=22561) 2024-11-11T12:44:20,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,134 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,135 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:20,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329120157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,160 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329120157, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,160 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,160 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329120158, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,261 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 154 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329120260, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 156 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329120261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329120261, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,263 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329120262, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,464 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,464 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 158 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329120463, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329120465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,467 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329120465, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,535 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=328, memsize=15.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/296485ea6fc64f128cec6c3c4a101f64 2024-11-11T12:44:20,558 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/6869dd0c187548ef9718bad43e096adb is 50, key is test_row_0/B:col10/1731329058136/Put/seqid=0 2024-11-11T12:44:20,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742490_1666 (size=9857) 2024-11-11T12:44:20,768 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 160 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329120768, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329120770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:20,771 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:20,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 164 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329120770, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:21,006 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/6869dd0c187548ef9718bad43e096adb 2024-11-11T12:44:21,018 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/ad5dbfa3248346d6ad01a35ebc7c4599 is 50, key is test_row_0/C:col10/1731329058136/Put/seqid=0 2024-11-11T12:44:21,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742491_1667 (size=9857) 2024-11-11T12:44:21,036 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.65 KB at sequenceid=328 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/ad5dbfa3248346d6ad01a35ebc7c4599 2024-11-11T12:44:21,043 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/296485ea6fc64f128cec6c3c4a101f64 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/296485ea6fc64f128cec6c3c4a101f64 2024-11-11T12:44:21,047 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/296485ea6fc64f128cec6c3c4a101f64, entries=100, sequenceid=328, filesize=22.0 K 2024-11-11T12:44:21,048 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/6869dd0c187548ef9718bad43e096adb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/6869dd0c187548ef9718bad43e096adb 2024-11-11T12:44:21,053 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/6869dd0c187548ef9718bad43e096adb, entries=100, sequenceid=328, filesize=9.6 K 2024-11-11T12:44:21,054 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/ad5dbfa3248346d6ad01a35ebc7c4599 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ad5dbfa3248346d6ad01a35ebc7c4599 2024-11-11T12:44:21,058 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ad5dbfa3248346d6ad01a35ebc7c4599, entries=100, sequenceid=328, filesize=9.6 K 2024-11-11T12:44:21,059 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~46.96 KB/48090, heapSize ~123.75 KB/126720, currentSize=154.31 KB/158010 for cc6fe7bde4d6aa548700eb200610e776 in 1360ms, sequenceid=328, compaction requested=true 2024-11-11T12:44:21,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:21,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:21,059 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-11-11T12:44:21,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-11-11T12:44:21,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=172, resume processing ppid=171 2024-11-11T12:44:21,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 3.2420 sec 2024-11-11T12:44:21,063 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=171, table=TestAcidGuarantees in 3.2460 sec 2024-11-11T12:44:21,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=161.02 KB heapSize=422.63 KB 2024-11-11T12:44:21,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:21,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:21,149 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:21,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:21,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:21,150 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:21,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:21,162 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110fb941278e6644349fd3ebe3c135b95d_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329060157/Put/seqid=0 2024-11-11T12:44:21,183 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:21,184 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 108 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329121183, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:21,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742492_1668 (size=12454) 2024-11-11T12:44:21,206 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:21,224 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110fb941278e6644349fd3ebe3c135b95d_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110fb941278e6644349fd3ebe3c135b95d_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:21,225 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5a168c6db960463a818992166ad435a1, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:21,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5a168c6db960463a818992166ad435a1 is 175, key is test_row_0/A:col10/1731329060157/Put/seqid=0 2024-11-11T12:44:21,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742493_1669 (size=31255) 2024-11-11T12:44:21,277 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:21,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329121274, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:21,278 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:21,278 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 162 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329121277, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:21,286 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:21,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329121284, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:21,290 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:21,290 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 110 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329121288, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:21,492 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:21,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 112 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329121491, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:21,668 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=355, memsize=53.7 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5a168c6db960463a818992166ad435a1 2024-11-11T12:44:21,686 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/eaf50be55b7740f0ba33b21945df0e9b is 50, key is test_row_0/B:col10/1731329060157/Put/seqid=0 2024-11-11T12:44:21,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742494_1670 (size=12301) 2024-11-11T12:44:21,796 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:21,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 114 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329121795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:21,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=171 2024-11-11T12:44:21,926 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 171 completed 2024-11-11T12:44:21,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:21,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees 2024-11-11T12:44:21,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-11T12:44:21,930 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:21,930 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=173, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:21,930 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:22,030 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-11T12:44:22,084 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-11T12:44:22,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:22,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:22,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:22,085 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] handler.RSProcedureHandler(58): pid=174 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:22,085 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=174 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:22,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=174 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:22,100 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/eaf50be55b7740f0ba33b21945df0e9b 2024-11-11T12:44:22,145 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/1d41e71850df4f75bd8b6e05af02313b is 50, key is test_row_0/C:col10/1731329060157/Put/seqid=0 2024-11-11T12:44:22,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742495_1671 (size=12301) 2024-11-11T12:44:22,168 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.67 KB at sequenceid=355 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/1d41e71850df4f75bd8b6e05af02313b 2024-11-11T12:44:22,176 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/5a168c6db960463a818992166ad435a1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a168c6db960463a818992166ad435a1 2024-11-11T12:44:22,185 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a168c6db960463a818992166ad435a1, entries=150, sequenceid=355, filesize=30.5 K 2024-11-11T12:44:22,186 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/eaf50be55b7740f0ba33b21945df0e9b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/eaf50be55b7740f0ba33b21945df0e9b 2024-11-11T12:44:22,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/eaf50be55b7740f0ba33b21945df0e9b, entries=150, sequenceid=355, filesize=12.0 K 2024-11-11T12:44:22,195 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/1d41e71850df4f75bd8b6e05af02313b as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d41e71850df4f75bd8b6e05af02313b 2024-11-11T12:44:22,199 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d41e71850df4f75bd8b6e05af02313b, entries=150, sequenceid=355, filesize=12.0 K 2024-11-11T12:44:22,199 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~161.02 KB/164880, heapSize ~422.58 KB/432720, currentSize=40.25 KB/41220 for cc6fe7bde4d6aa548700eb200610e776 in 1050ms, sequenceid=355, compaction requested=true 2024-11-11T12:44:22,199 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:22,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:44:22,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:22,200 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:22,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:44:22,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:22,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:22,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:22,200 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:22,201 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 125702 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:22,202 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/A is initiating minor compaction (all files) 2024-11-11T12:44:22,202 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47442 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:22,202 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/A in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:22,202 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/B is initiating minor compaction (all files) 2024-11-11T12:44:22,202 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/B in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:22,202 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/346d0f0fbfc447779bd6cd28ad6d6fcd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/bffe84d15749455f9183e3edddb274f4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/296485ea6fc64f128cec6c3c4a101f64, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a168c6db960463a818992166ad435a1] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=122.8 K 2024-11-11T12:44:22,202 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/83f09d85faa24702b096f12395c894e2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbd1a3751cea497389678b177d445204, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/6869dd0c187548ef9718bad43e096adb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/eaf50be55b7740f0ba33b21945df0e9b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=46.3 K 2024-11-11T12:44:22,202 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=12 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:22,202 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/346d0f0fbfc447779bd6cd28ad6d6fcd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/bffe84d15749455f9183e3edddb274f4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/296485ea6fc64f128cec6c3c4a101f64, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a168c6db960463a818992166ad435a1] 2024-11-11T12:44:22,202 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 83f09d85faa24702b096f12395c894e2, keycount=150, bloomtype=ROW, size=12.7 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731329057937 2024-11-11T12:44:22,202 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 346d0f0fbfc447779bd6cd28ad6d6fcd, keycount=150, bloomtype=ROW, size=31.2 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731329057937 2024-11-11T12:44:22,202 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting bbd1a3751cea497389678b177d445204, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731329057988 2024-11-11T12:44:22,203 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 6869dd0c187548ef9718bad43e096adb, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329058123 2024-11-11T12:44:22,203 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting bffe84d15749455f9183e3edddb274f4, keycount=200, bloomtype=ROW, size=39.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731329057988 2024-11-11T12:44:22,203 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting eaf50be55b7740f0ba33b21945df0e9b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731329060155 2024-11-11T12:44:22,203 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 296485ea6fc64f128cec6c3c4a101f64, keycount=100, bloomtype=ROW, size=22.0 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329058123 2024-11-11T12:44:22,204 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 5a168c6db960463a818992166ad435a1, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731329060155 2024-11-11T12:44:22,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-11T12:44:22,237 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:22,238 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,238 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=174 2024-11-11T12:44:22,238 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:22,239 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=40.25 KB heapSize=106.22 KB 2024-11-11T12:44:22,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:22,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:22,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:22,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:22,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:22,239 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:22,246 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#B#compaction#561 average throughput is 3.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:22,246 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/01339c1b305749d0af8e3ca9e65f2f1e is 50, key is test_row_0/B:col10/1731329060157/Put/seqid=0 2024-11-11T12:44:22,255 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111101de622cf2864c7e8d67e8c9f98f0c85_cc6fe7bde4d6aa548700eb200610e776 store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:22,257 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111101de622cf2864c7e8d67e8c9f98f0c85_cc6fe7bde4d6aa548700eb200610e776, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:22,257 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111101de622cf2864c7e8d67e8c9f98f0c85_cc6fe7bde4d6aa548700eb200610e776 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:22,267 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411114d2e40ccf50a44979c1bc0fc9b9eeff7_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329061165/Put/seqid=0 2024-11-11T12:44:22,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742497_1673 (size=4469) 2024-11-11T12:44:22,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:22,275 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#A#compaction#560 average throughput is 0.64 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:22,275 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/027f0c9dfe444c8da0fdcd71a2d5745c is 175, key is test_row_0/A:col10/1731329060157/Put/seqid=0 2024-11-11T12:44:22,276 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:22,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742496_1672 (size=13119) 2024-11-11T12:44:22,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742498_1674 (size=12454) 2024-11-11T12:44:22,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742499_1675 (size=32073) 2024-11-11T12:44:22,291 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/01339c1b305749d0af8e3ca9e65f2f1e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/01339c1b305749d0af8e3ca9e65f2f1e 2024-11-11T12:44:22,295 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/B of cc6fe7bde4d6aa548700eb200610e776 into 01339c1b305749d0af8e3ca9e65f2f1e(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:22,295 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:22,296 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/B, priority=12, startTime=1731329062200; duration=0sec 2024-11-11T12:44:22,296 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:22,296 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:22,296 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-11T12:44:22,297 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 47408 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-11T12:44:22,297 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/C is initiating minor compaction (all files) 2024-11-11T12:44:22,297 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/C in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:22,298 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f1f63508abbe4682a1f9ff78b403ff42, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/4172af86cb6640f0b68d72af7d48ab1b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ad5dbfa3248346d6ad01a35ebc7c4599, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d41e71850df4f75bd8b6e05af02313b] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=46.3 K 2024-11-11T12:44:22,298 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting f1f63508abbe4682a1f9ff78b403ff42, keycount=150, bloomtype=ROW, size=12.6 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1731329057937 2024-11-11T12:44:22,299 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4172af86cb6640f0b68d72af7d48ab1b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=315, earliestPutTs=1731329057988 2024-11-11T12:44:22,299 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting ad5dbfa3248346d6ad01a35ebc7c4599, keycount=100, bloomtype=ROW, size=9.6 K, encoding=NONE, compression=NONE, seqNum=328, earliestPutTs=1731329058123 2024-11-11T12:44:22,299 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 1d41e71850df4f75bd8b6e05af02313b, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731329060155 2024-11-11T12:44:22,308 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#C#compaction#563 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:22,308 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/07c3901ef3ff40399e11aa6622d7a950 is 50, key is test_row_0/C:col10/1731329060157/Put/seqid=0 2024-11-11T12:44:22,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742500_1676 (size=13085) 2024-11-11T12:44:22,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,312 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329122309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,312 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 118 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329122309, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,313 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,313 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 166 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329122310, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,314 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329122311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,314 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 179 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329122311, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,315 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/07c3901ef3ff40399e11aa6622d7a950 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07c3901ef3ff40399e11aa6622d7a950 2024-11-11T12:44:22,321 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/C of cc6fe7bde4d6aa548700eb200610e776 into 07c3901ef3ff40399e11aa6622d7a950(size=12.8 K), total size for store is 12.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:22,321 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:22,321 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/C, priority=12, startTime=1731329062200; duration=0sec 2024-11-11T12:44:22,321 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:22,321 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:22,415 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 120 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329122413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,415 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329122413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,416 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 168 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329122414, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,417 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,417 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329122415, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,419 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,419 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 181 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329122418, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,532 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-11T12:44:22,617 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 122 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329122616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,618 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329122616, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,619 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 170 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329122618, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,621 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329122619, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,622 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329122620, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,687 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:22,692 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411114d2e40ccf50a44979c1bc0fc9b9eeff7_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411114d2e40ccf50a44979c1bc0fc9b9eeff7_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:22,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/525c7199eb3745f18d5800aec0aab46f, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:22,693 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/525c7199eb3745f18d5800aec0aab46f is 175, key is test_row_0/A:col10/1731329061165/Put/seqid=0 2024-11-11T12:44:22,694 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/027f0c9dfe444c8da0fdcd71a2d5745c as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/027f0c9dfe444c8da0fdcd71a2d5745c 2024-11-11T12:44:22,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742501_1677 (size=31255) 2024-11-11T12:44:22,699 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=364, memsize=13.4 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/525c7199eb3745f18d5800aec0aab46f 2024-11-11T12:44:22,699 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 4 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/A of cc6fe7bde4d6aa548700eb200610e776 into 027f0c9dfe444c8da0fdcd71a2d5745c(size=31.3 K), total size for store is 31.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:22,699 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:22,699 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/A, priority=12, startTime=1731329062200; duration=0sec 2024-11-11T12:44:22,699 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:22,699 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:22,706 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/3e51d3ce07ec40d683a0d5a2769c86ab is 50, key is test_row_0/B:col10/1731329061165/Put/seqid=0 2024-11-11T12:44:22,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742502_1678 (size=12301) 2024-11-11T12:44:22,921 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 172 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329122921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,921 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 174 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329122921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,922 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 124 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329122921, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,923 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329122922, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:22,925 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:22,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329122924, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-11T12:44:23,111 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/3e51d3ce07ec40d683a0d5a2769c86ab 2024-11-11T12:44:23,118 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/717b5d14587b4431875906299f8c9b3e is 50, key is test_row_0/C:col10/1731329061165/Put/seqid=0 2024-11-11T12:44:23,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742503_1679 (size=12301) 2024-11-11T12:44:23,130 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.42 KB at sequenceid=364 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/717b5d14587b4431875906299f8c9b3e 2024-11-11T12:44:23,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/525c7199eb3745f18d5800aec0aab46f as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/525c7199eb3745f18d5800aec0aab46f 2024-11-11T12:44:23,139 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/525c7199eb3745f18d5800aec0aab46f, entries=150, sequenceid=364, filesize=30.5 K 2024-11-11T12:44:23,140 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/3e51d3ce07ec40d683a0d5a2769c86ab as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/3e51d3ce07ec40d683a0d5a2769c86ab 2024-11-11T12:44:23,143 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/3e51d3ce07ec40d683a0d5a2769c86ab, entries=150, sequenceid=364, filesize=12.0 K 2024-11-11T12:44:23,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/717b5d14587b4431875906299f8c9b3e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/717b5d14587b4431875906299f8c9b3e 2024-11-11T12:44:23,148 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/717b5d14587b4431875906299f8c9b3e, entries=150, sequenceid=364, filesize=12.0 K 2024-11-11T12:44:23,149 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(3040): Finished flush of dataSize ~40.25 KB/41220, heapSize ~106.17 KB/108720, currentSize=167.72 KB/171750 for cc6fe7bde4d6aa548700eb200610e776 in 911ms, sequenceid=364, compaction requested=false 2024-11-11T12:44:23,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:23,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:23,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=174}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=174 2024-11-11T12:44:23,149 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=174 2024-11-11T12:44:23,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-11-11T12:44:23,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 1.2200 sec 2024-11-11T12:44:23,153 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=173, table=TestAcidGuarantees in 1.2230 sec 2024-11-11T12:44:23,425 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:23,426 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=174.43 KB heapSize=457.78 KB 2024-11-11T12:44:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:23,427 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:23,432 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329123430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,433 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110c73ec23bf5d45ba9d98fe4d3fa90d4d_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329063425/Put/seqid=0 2024-11-11T12:44:23,433 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,433 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 128 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329123430, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,434 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 176 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329123431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329123431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,434 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 183 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329123431, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742504_1680 (size=12454) 2024-11-11T12:44:23,455 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,459 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110c73ec23bf5d45ba9d98fe4d3fa90d4d_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110c73ec23bf5d45ba9d98fe4d3fa90d4d_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:23,460 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/90f2e128396e45ec9a79ed0902d8a7fb, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:23,460 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/90f2e128396e45ec9a79ed0902d8a7fb is 175, key is test_row_0/A:col10/1731329063425/Put/seqid=0 2024-11-11T12:44:23,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742505_1681 (size=31255) 2024-11-11T12:44:23,471 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=398, memsize=62.6 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/90f2e128396e45ec9a79ed0902d8a7fb 2024-11-11T12:44:23,482 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/4156fd8b97e0466fa4207e7031b42be0 is 50, key is test_row_0/B:col10/1731329063425/Put/seqid=0 2024-11-11T12:44:23,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742506_1682 (size=12301) 2024-11-11T12:44:23,497 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/4156fd8b97e0466fa4207e7031b42be0 2024-11-11T12:44:23,522 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/acf67d7253b74ef8aa4fc57cada4ea08 is 50, key is test_row_0/C:col10/1731329063425/Put/seqid=0 2024-11-11T12:44:23,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 178 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329123535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 130 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329123534, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,537 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 185 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329123535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 180 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329123535, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742507_1683 (size=12301) 2024-11-11T12:44:23,541 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=62.62 KB at sequenceid=398 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/acf67d7253b74ef8aa4fc57cada4ea08 2024-11-11T12:44:23,546 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/90f2e128396e45ec9a79ed0902d8a7fb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/90f2e128396e45ec9a79ed0902d8a7fb 2024-11-11T12:44:23,550 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/90f2e128396e45ec9a79ed0902d8a7fb, entries=150, sequenceid=398, filesize=30.5 K 2024-11-11T12:44:23,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,552 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/4156fd8b97e0466fa4207e7031b42be0 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/4156fd8b97e0466fa4207e7031b42be0 2024-11-11T12:44:23,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,557 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/4156fd8b97e0466fa4207e7031b42be0, entries=150, sequenceid=398, filesize=12.0 K 2024-11-11T12:44:23,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/acf67d7253b74ef8aa4fc57cada4ea08 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/acf67d7253b74ef8aa4fc57cada4ea08 2024-11-11T12:44:23,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/acf67d7253b74ef8aa4fc57cada4ea08, entries=150, sequenceid=398, filesize=12.0 K 2024-11-11T12:44:23,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~187.85 KB/192360, heapSize ~492.89 KB/504720, currentSize=26.84 KB/27480 for cc6fe7bde4d6aa548700eb200610e776 in 137ms, sequenceid=398, compaction requested=true 2024-11-11T12:44:23,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:A, priority=-2147483648, current under compaction store size is 1 2024-11-11T12:44:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:23,563 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:B, priority=-2147483648, current under compaction store size is 2 2024-11-11T12:44:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store cc6fe7bde4d6aa548700eb200610e776:C, priority=-2147483648, current under compaction store size is 3 2024-11-11T12:44:23,563 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:23,563 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=2), splitQueue=0 2024-11-11T12:44:23,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,564 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37721 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:23,564 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/B is initiating minor compaction (all files) 2024-11-11T12:44:23,564 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/B in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,564 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/01339c1b305749d0af8e3ca9e65f2f1e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/3e51d3ce07ec40d683a0d5a2769c86ab, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/4156fd8b97e0466fa4207e7031b42be0] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=36.8 K 2024-11-11T12:44:23,565 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94583 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:23,565 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/A is initiating minor compaction (all files) 2024-11-11T12:44:23,565 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/A in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:23,565 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/027f0c9dfe444c8da0fdcd71a2d5745c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/525c7199eb3745f18d5800aec0aab46f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/90f2e128396e45ec9a79ed0902d8a7fb] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=92.4 K 2024-11-11T12:44:23,565 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(181): MOB compaction: major=false isAll=true priority=13 throughput controller=DefaultCompactionThroughputController [maxThroughput=50.00 MB/second, activeCompactions=0] table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,565 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(191): MOB compaction table=TestAcidGuarantees cf=A region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. files: [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/027f0c9dfe444c8da0fdcd71a2d5745c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/525c7199eb3745f18d5800aec0aab46f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/90f2e128396e45ec9a79ed0902d8a7fb] 2024-11-11T12:44:23,565 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 01339c1b305749d0af8e3ca9e65f2f1e, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731329060155 2024-11-11T12:44:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,565 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 3e51d3ce07ec40d683a0d5a2769c86ab, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1731329061165 2024-11-11T12:44:23,565 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 027f0c9dfe444c8da0fdcd71a2d5745c, keycount=150, bloomtype=ROW, size=31.3 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731329060155 2024-11-11T12:44:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,566 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 4156fd8b97e0466fa4207e7031b42be0, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1731329063424 2024-11-11T12:44:23,566 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 525c7199eb3745f18d5800aec0aab46f, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1731329061165 2024-11-11T12:44:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,566 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] compactions.Compactor(224): Compacting 90f2e128396e45ec9a79ed0902d8a7fb, keycount=150, bloomtype=ROW, size=30.5 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1731329063424 2024-11-11T12:44:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,569 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,570 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,573 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(322): Compact MOB=false optimized configured=false optimized enabled=false maximum MOB file size=1073741824 major=true store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:23,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,574 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#B#compaction#569 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 1 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:23,574 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,574 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/d90f5dbd9cd146da94c31da04b8f4cd2 is 50, key is test_row_0/B:col10/1731329063425/Put/seqid=0 2024-11-11T12:44:23,575 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(626): New MOB writer created=d41d8cd98f00b204e9800998ecf8427e2024111141d0d8f5cd7149cd995572758faf2af3_cc6fe7bde4d6aa548700eb200610e776 store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:23,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,576 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,577 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(647): Commit or abort size=0 mobCells=0 major=true file=d41d8cd98f00b204e9800998ecf8427e2024111141d0d8f5cd7149cd995572758faf2af3_cc6fe7bde4d6aa548700eb200610e776, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:23,577 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] mob.DefaultMobStoreCompactor(658): Aborting writer for hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111141d0d8f5cd7149cd995572758faf2af3_cc6fe7bde4d6aa548700eb200610e776 because there are no MOB cells, store=[table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:23,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,580 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,581 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,583 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,588 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,589 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,590 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,592 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,594 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,595 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,597 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,599 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,602 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,603 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,604 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,605 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742508_1684 (size=13221) 2024-11-11T12:44:23,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742509_1685 (size=4469) 2024-11-11T12:44:23,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,619 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,624 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,629 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,630 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,632 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,635 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,648 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,653 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,654 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,666 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,669 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,675 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,684 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,685 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,686 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,689 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,695 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,697 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,698 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,708 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,737 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,750 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:23,751 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=53.67 KB heapSize=141.38 KB 2024-11-11T12:44:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:23,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:23,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,751 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:23,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,753 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,754 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,757 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111141b156d734fb4df18d1e5b6b70abd6c1_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_1/A:col10/1731329063749/Put/seqid=0 2024-11-11T12:44:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742510_1686 (size=17534) 2024-11-11T12:44:23,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,767 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,769 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,770 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,774 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,779 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,780 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,782 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:23,798 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329123795, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,800 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 188 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329123797, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 139 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329123798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,801 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329123798, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,900 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 195 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329123899, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 141 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329123904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,905 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329123904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:23,906 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:23,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 190 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329123904, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,017 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#A#compaction#570 average throughput is 0.06 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:24,018 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2996050abdaa452da5e814501a73b0f6 is 175, key is test_row_0/A:col10/1731329063425/Put/seqid=0 2024-11-11T12:44:24,020 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/d90f5dbd9cd146da94c31da04b8f4cd2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/d90f5dbd9cd146da94c31da04b8f4cd2 2024-11-11T12:44:24,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-11-11T12:44:24,034 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 173 completed 2024-11-11T12:44:24,034 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/B of cc6fe7bde4d6aa548700eb200610e776 into d90f5dbd9cd146da94c31da04b8f4cd2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:24,034 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:24,035 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/B, priority=13, startTime=1731329063563; duration=0sec 2024-11-11T12:44:24,035 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=1), splitQueue=0 2024-11-11T12:44:24,035 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:B 2024-11-11T12:44:24,035 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-11T12:44:24,035 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$22(4386): Client=jenkins//172.17.0.3 flush TestAcidGuarantees 2024-11-11T12:44:24,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees 2024-11-11T12:44:24,036 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_PREPARE 2024-11-11T12:44:24,037 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=175, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-11T12:44:24,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-11T12:44:24,037 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=176, ppid=175, state=RUNNABLE; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-11T12:44:24,048 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37687 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-11T12:44:24,048 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1540): cc6fe7bde4d6aa548700eb200610e776/C is initiating minor compaction (all files) 2024-11-11T12:44:24,048 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2351): Starting compaction of cc6fe7bde4d6aa548700eb200610e776/C in TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,048 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07c3901ef3ff40399e11aa6622d7a950, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/717b5d14587b4431875906299f8c9b3e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/acf67d7253b74ef8aa4fc57cada4ea08] into tmpdir=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp, totalSize=36.8 K 2024-11-11T12:44:24,049 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 07c3901ef3ff40399e11aa6622d7a950, keycount=150, bloomtype=ROW, size=12.8 K, encoding=NONE, compression=NONE, seqNum=355, earliestPutTs=1731329060155 2024-11-11T12:44:24,049 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting 717b5d14587b4431875906299f8c9b3e, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=364, earliestPutTs=1731329061165 2024-11-11T12:44:24,050 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] compactions.Compactor(224): Compacting acf67d7253b74ef8aa4fc57cada4ea08, keycount=150, bloomtype=ROW, size=12.0 K, encoding=NONE, compression=NONE, seqNum=398, earliestPutTs=1731329063424 2024-11-11T12:44:24,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742511_1687 (size=32175) 2024-11-11T12:44:24,063 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): cc6fe7bde4d6aa548700eb200610e776#C#compaction#572 average throughput is 6.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-11T12:44:24,064 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/3eb32659387641ce900ab2a5669f7ee2 is 50, key is test_row_0/C:col10/1731329063425/Put/seqid=0 2024-11-11T12:44:24,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742512_1688 (size=13187) 2024-11-11T12:44:24,104 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,104 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 197 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329124102, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,108 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 191 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329124107, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,109 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 143 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329124108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,110 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 192 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329124108, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-11T12:44:24,164 DEBUG [MemStoreFlusher.0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:24,173 INFO [MemStoreFlusher.0 {}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111141b156d734fb4df18d1e5b6b70abd6c1_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111141b156d734fb4df18d1e5b6b70abd6c1_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:24,174 DEBUG [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/1833bd1ec506459081ea8e5ba39962ff, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:24,174 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/1833bd1ec506459081ea8e5ba39962ff is 175, key is test_row_1/A:col10/1731329063749/Put/seqid=0 2024-11-11T12:44:24,189 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,189 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-11T12:44:24,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742513_1689 (size=48635) 2024-11-11T12:44:24,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:24,189 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,190 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,190 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,191 INFO [MemStoreFlusher.0 {}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=409, memsize=17.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/1833bd1ec506459081ea8e5ba39962ff 2024-11-11T12:44:24,201 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e153e60399b649ab92ccb5d4ce39c5c1 is 50, key is test_row_1/B:col10/1731329063749/Put/seqid=0 2024-11-11T12:44:24,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742514_1690 (size=9857) 2024-11-11T12:44:24,213 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e153e60399b649ab92ccb5d4ce39c5c1 2024-11-11T12:44:24,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/9369ea362c3444bab0340cc5078d9763 is 50, key is test_row_1/C:col10/1731329063749/Put/seqid=0 2024-11-11T12:44:24,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742515_1691 (size=9857) 2024-11-11T12:44:24,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-11T12:44:24,343 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,343 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-11T12:44:24,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:24,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,344 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,344 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,408 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,408 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 199 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49476 deadline: 1731329124407, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,413 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 145 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49484 deadline: 1731329124411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,414 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 193 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49524 deadline: 1731329124411, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,414 WARN [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 194 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49488 deadline: 1731329124413, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,445 WARN [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] regionserver.HRegion(5069): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5067) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$9(HRegion.java:3229) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3222) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3072) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:3035) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-11-11T12:44:24,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=44673 {}] ipc.CallRunner(138): callId: 189 service: ClientService methodName: Mutate size: 4.7 K connection: 172.17.0.3:49436 deadline: 1731329124445, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=512.0 K, regionName=cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,464 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/2996050abdaa452da5e814501a73b0f6 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2996050abdaa452da5e814501a73b0f6 2024-11-11T12:44:24,468 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/A of cc6fe7bde4d6aa548700eb200610e776 into 2996050abdaa452da5e814501a73b0f6(size=31.4 K), total size for store is 31.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:24,468 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:24,468 INFO [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/A, priority=13, startTime=1731329063563; duration=0sec 2024-11-11T12:44:24,468 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:24,468 DEBUG [RS:0;32e78532c8b1:44673-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:A 2024-11-11T12:44:24,483 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/3eb32659387641ce900ab2a5669f7ee2 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3eb32659387641ce900ab2a5669f7ee2 2024-11-11T12:44:24,494 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HStore(1336): Completed compaction of 3 (all) file(s) in cc6fe7bde4d6aa548700eb200610e776/C of cc6fe7bde4d6aa548700eb200610e776 into 3eb32659387641ce900ab2a5669f7ee2(size=12.9 K), total size for store is 12.9 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-11T12:44:24,494 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.HRegion(2381): Compaction status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:24,494 INFO [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776., storeName=cc6fe7bde4d6aa548700eb200610e776/C, priority=13, startTime=1731329063563; duration=0sec 2024-11-11T12:44:24,494 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-11T12:44:24,494 DEBUG [RS:0;32e78532c8b1:44673-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: cc6fe7bde4d6aa548700eb200610e776:C 2024-11-11T12:44:24,496 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,496 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-11T12:44:24,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:24,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,497 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,497 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-0 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-11T12:44:24,643 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=409 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/9369ea362c3444bab0340cc5078d9763 2024-11-11T12:44:24,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/1833bd1ec506459081ea8e5ba39962ff as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1833bd1ec506459081ea8e5ba39962ff 2024-11-11T12:44:24,649 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,650 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-11T12:44:24,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:24,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,650 ERROR [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] handler.RSProcedureHandler(58): pid=176 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,650 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-1 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(58): Failed to complete execution of pid=176 java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4114): Remote procedure failed, pid=176 org.apache.hadoop.hbase.procedure2.RemoteProcedureException: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.procedure2.RemoteProcedureException.fromProto(RemoteProcedureException.java:123) ~[hbase-procedure-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.MasterRpcServices.lambda$reportProcedureDone$4(MasterRpcServices.java:2571) ~[classes/:?] at java.util.ArrayList.forEach(ArrayList.java:1511) ~[?:?] at java.util.Collections$UnmodifiableCollection.forEach(Collections.java:1092) ~[?:?] at org.apache.hadoop.hbase.master.MasterRpcServices.reportProcedureDone(MasterRpcServices.java:2566) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16726) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] Caused by: java.io.IOException: Unable to complete flush {ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''} at org.apache.hadoop.hbase.regionserver.FlushRegionCallable.doCall(FlushRegionCallable.java:61) ~[classes/:?] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:35) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.procedure2.BaseRSProcedureCallable.call(BaseRSProcedureCallable.java:23) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.handler.RSProcedureHandler.process(RSProcedureHandler.java:56) ~[classes/:?] at org.apache.hadoop.hbase.executor.EventHandler.run(EventHandler.java:104) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-11T12:44:24,658 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1833bd1ec506459081ea8e5ba39962ff, entries=250, sequenceid=409, filesize=47.5 K 2024-11-11T12:44:24,659 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/e153e60399b649ab92ccb5d4ce39c5c1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e153e60399b649ab92ccb5d4ce39c5c1 2024-11-11T12:44:24,663 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e153e60399b649ab92ccb5d4ce39c5c1, entries=100, sequenceid=409, filesize=9.6 K 2024-11-11T12:44:24,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/9369ea362c3444bab0340cc5078d9763 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/9369ea362c3444bab0340cc5078d9763 2024-11-11T12:44:24,669 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/9369ea362c3444bab0340cc5078d9763, entries=100, sequenceid=409, filesize=9.6 K 2024-11-11T12:44:24,669 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3040): Finished flush of dataSize ~53.67 KB/54960, heapSize ~141.33 KB/144720, currentSize=147.60 KB/151140 for cc6fe7bde4d6aa548700eb200610e776 in 919ms, sequenceid=409, compaction requested=false 2024-11-11T12:44:24,670 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:24,803 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:24,803 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44673 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=176 2024-11-11T12:44:24,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(51): Starting region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:24,804 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=147.60 KB heapSize=387.47 KB 2024-11-11T12:44:24,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:24,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:24,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:24,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:24,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:24,804 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:24,814 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110a2dbe92cb664e72b9685926a495a072_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329063796/Put/seqid=0 2024-11-11T12:44:24,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742516_1692 (size=12454) 2024-11-11T12:44:24,901 DEBUG [Thread-2556 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d7fe93b to 127.0.0.1:54294 2024-11-11T12:44:24,901 DEBUG [Thread-2562 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6e8cd1ae to 127.0.0.1:54294 2024-11-11T12:44:24,901 DEBUG [Thread-2562 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:24,901 DEBUG [Thread-2556 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:24,902 DEBUG [Thread-2558 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11c440f7 to 127.0.0.1:54294 2024-11-11T12:44:24,902 DEBUG [Thread-2558 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:24,903 DEBUG [Thread-2564 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x4d832d43 to 127.0.0.1:54294 2024-11-11T12:44:24,903 DEBUG [Thread-2564 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:24,904 DEBUG [Thread-2560 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x58460ef3 to 127.0.0.1:54294 2024-11-11T12:44:24,904 DEBUG [Thread-2560 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:24,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=44673 {}] regionserver.HRegion(8581): Flush requested on cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:24,910 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2496): NOT flushing TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. as already flushing 2024-11-11T12:44:24,910 DEBUG [Thread-2545 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0644b7e6 to 127.0.0.1:54294 2024-11-11T12:44:24,910 DEBUG [Thread-2545 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:24,918 DEBUG [Thread-2551 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2d7fe431 to 127.0.0.1:54294 2024-11-11T12:44:24,918 DEBUG [Thread-2551 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:24,918 DEBUG [Thread-2553 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x091d72db to 127.0.0.1:54294 2024-11-11T12:44:24,918 DEBUG [Thread-2553 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:24,921 DEBUG [Thread-2549 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11a52cdf to 127.0.0.1:54294 2024-11-11T12:44:24,921 DEBUG [Thread-2549 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:25,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-11T12:44:25,228 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:25,232 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411110a2dbe92cb664e72b9685926a495a072_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110a2dbe92cb664e72b9685926a495a072_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:25,232 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/faaa3896e7ef4c2fa53c411a8a179feb, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:25,233 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/faaa3896e7ef4c2fa53c411a8a179feb is 175, key is test_row_0/A:col10/1731329063796/Put/seqid=0 2024-11-11T12:44:25,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742517_1693 (size=31255) 2024-11-11T12:44:25,637 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=437, memsize=49.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/faaa3896e7ef4c2fa53c411a8a179feb 2024-11-11T12:44:25,644 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/91da32c841a342fbaca98d1bbbe1ab89 is 50, key is test_row_0/B:col10/1731329063796/Put/seqid=0 2024-11-11T12:44:25,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742518_1694 (size=12301) 2024-11-11T12:44:26,048 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/91da32c841a342fbaca98d1bbbe1ab89 2024-11-11T12:44:26,055 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/07359805b57b4b73a706e316995a42f1 is 50, key is test_row_0/C:col10/1731329063796/Put/seqid=0 2024-11-11T12:44:26,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742519_1695 (size=12301) 2024-11-11T12:44:26,062 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=49.20 KB at sequenceid=437 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/07359805b57b4b73a706e316995a42f1 2024-11-11T12:44:26,065 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/faaa3896e7ef4c2fa53c411a8a179feb as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/faaa3896e7ef4c2fa53c411a8a179feb 2024-11-11T12:44:26,069 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/faaa3896e7ef4c2fa53c411a8a179feb, entries=150, sequenceid=437, filesize=30.5 K 2024-11-11T12:44:26,069 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/91da32c841a342fbaca98d1bbbe1ab89 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91da32c841a342fbaca98d1bbbe1ab89 2024-11-11T12:44:26,072 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91da32c841a342fbaca98d1bbbe1ab89, entries=150, sequenceid=437, filesize=12.0 K 2024-11-11T12:44:26,073 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/07359805b57b4b73a706e316995a42f1 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07359805b57b4b73a706e316995a42f1 2024-11-11T12:44:26,077 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07359805b57b4b73a706e316995a42f1, entries=150, sequenceid=437, filesize=12.0 K 2024-11-11T12:44:26,078 INFO [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(3040): Finished flush of dataSize ~147.60 KB/151140, heapSize ~387.42 KB/396720, currentSize=26.84 KB/27480 for cc6fe7bde4d6aa548700eb200610e776 in 1274ms, sequenceid=437, compaction requested=true 2024-11-11T12:44:26,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.HRegion(2538): Flush status journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:26,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.FlushRegionCallable(64): Closing region operation on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:26,078 DEBUG [RS_FLUSH_OPERATIONS-regionserver/32e78532c8b1:0-2 {event_type=RS_FLUSH_REGIONS, pid=176}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=176 2024-11-11T12:44:26,078 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster(4106): Remote procedure done, pid=176 2024-11-11T12:44:26,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=175 2024-11-11T12:44:26,080 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 2.0420 sec 2024-11-11T12:44:26,081 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=175, table=TestAcidGuarantees in 2.0450 sec 2024-11-11T12:44:26,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=175 2024-11-11T12:44:26,145 INFO [Thread-2555 {}] client.HBaseAdmin$TableFuture(3751): Operation: FLUSH, Table Name: default:TestAcidGuarantees, procId: 175 completed 2024-11-11T12:44:26,454 DEBUG [Thread-2547 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5c9b5141 to 127.0.0.1:54294 2024-11-11T12:44:26,454 DEBUG [Thread-2547 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(392): Finished test. Writers: 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 74 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 52 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 71 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(394): wrote 76 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(396): Readers: 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5331 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5091 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5046 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5246 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(398): read 5187 2024-11-11T12:44:26,454 INFO [Time-limited test {}] hbase.AcidGuaranteesTestTool(400): Scanners: 2024-11-11T12:44:26,454 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T12:44:26,455 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3fb684eb to 127.0.0.1:54294 2024-11-11T12:44:26,455 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:26,455 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of TestAcidGuarantees 2024-11-11T12:44:26,455 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.3 disable TestAcidGuarantees 2024-11-11T12:44:26,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=177, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:26,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-11T12:44:26,458 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329066458"}]},"ts":"1731329066458"} 2024-11-11T12:44:26,459 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLING in hbase:meta 2024-11-11T12:44:26,462 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set TestAcidGuarantees to state=DISABLING 2024-11-11T12:44:26,462 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=TestAcidGuarantees}] 2024-11-11T12:44:26,463 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=179, ppid=178, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, UNASSIGN}] 2024-11-11T12:44:26,464 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=179, ppid=178, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, UNASSIGN 2024-11-11T12:44:26,464 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=179 updating hbase:meta row=cc6fe7bde4d6aa548700eb200610e776, regionState=CLOSING, regionLocation=32e78532c8b1,44673,1731328897232 2024-11-11T12:44:26,465 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-11T12:44:26,465 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=180, ppid=179, state=RUNNABLE; CloseRegionProcedure cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232}] 2024-11-11T12:44:26,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-11T12:44:26,616 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:26,616 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] handler.UnassignRegionHandler(124): Close cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:26,616 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-11-11T12:44:26,616 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1681): Closing cc6fe7bde4d6aa548700eb200610e776, disabling compactions & flushes 2024-11-11T12:44:26,617 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1703): Closing region TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1724): Waiting without time limit for close lock on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1791): Acquired close lock on TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. after waiting 0 ms 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1801): Updates disabled for region TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:26,617 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(2837): Flushing cc6fe7bde4d6aa548700eb200610e776 3/3 column families, dataSize=33.54 KB heapSize=88.64 KB 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=A 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=B 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactingMemStore(205): FLUSHING TO DISK cc6fe7bde4d6aa548700eb200610e776, store=C 2024-11-11T12:44:26,617 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.CompactionPipeline(136): Swapping pipeline suffix; before=1, new segment=null 2024-11-11T12:44:26,622 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111179dcc92a8693425bad11e0df51bab907_cc6fe7bde4d6aa548700eb200610e776 is 50, key is test_row_0/A:col10/1731329066453/Put/seqid=0 2024-11-11T12:44:26,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742520_1696 (size=12454) 2024-11-11T12:44:26,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-11T12:44:27,026 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-11T12:44:27,039 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e2024111179dcc92a8693425bad11e0df51bab907_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111179dcc92a8693425bad11e0df51bab907_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:27,040 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/e2a4be508faf45cc9879201d7125bbff, store: [table=TestAcidGuarantees family=A region=cc6fe7bde4d6aa548700eb200610e776] 2024-11-11T12:44:27,041 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/e2a4be508faf45cc9879201d7125bbff is 175, key is test_row_0/A:col10/1731329066453/Put/seqid=0 2024-11-11T12:44:27,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742521_1697 (size=31255) 2024-11-11T12:44:27,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-11T12:44:27,454 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=445, memsize=11.2 K, hasBloomFilter=true, into tmp file hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/e2a4be508faf45cc9879201d7125bbff 2024-11-11T12:44:27,461 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/a2c024cb0d7445fcbebdf4e5ad1d6b64 is 50, key is test_row_0/B:col10/1731329066453/Put/seqid=0 2024-11-11T12:44:27,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742522_1698 (size=12301) 2024-11-11T12:44:27,560 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-11T12:44:27,870 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/a2c024cb0d7445fcbebdf4e5ad1d6b64 2024-11-11T12:44:27,875 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/cd01dae742744e789e91a40db4702ab9 is 50, key is test_row_0/C:col10/1731329066453/Put/seqid=0 2024-11-11T12:44:27,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742523_1699 (size=12301) 2024-11-11T12:44:28,282 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.18 KB at sequenceid=445 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/cd01dae742744e789e91a40db4702ab9 2024-11-11T12:44:28,286 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/A/e2a4be508faf45cc9879201d7125bbff as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/e2a4be508faf45cc9879201d7125bbff 2024-11-11T12:44:28,290 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/e2a4be508faf45cc9879201d7125bbff, entries=150, sequenceid=445, filesize=30.5 K 2024-11-11T12:44:28,291 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/B/a2c024cb0d7445fcbebdf4e5ad1d6b64 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a2c024cb0d7445fcbebdf4e5ad1d6b64 2024-11-11T12:44:28,294 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a2c024cb0d7445fcbebdf4e5ad1d6b64, entries=150, sequenceid=445, filesize=12.0 K 2024-11-11T12:44:28,295 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/.tmp/C/cd01dae742744e789e91a40db4702ab9 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cd01dae742744e789e91a40db4702ab9 2024-11-11T12:44:28,298 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cd01dae742744e789e91a40db4702ab9, entries=150, sequenceid=445, filesize=12.0 K 2024-11-11T12:44:28,299 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(3040): Finished flush of dataSize ~33.54 KB/34350, heapSize ~88.59 KB/90720, currentSize=0 B/0 for cc6fe7bde4d6aa548700eb200610e776 in 1682ms, sequenceid=445, compaction requested=true 2024-11-11T12:44:28,300 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2afbc23dfdaf45c8a7a1694e33e7a12a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/116adffcd85740b09de06167e9684c81, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05015dfa8cc84b17a497700bb5e097c3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2f0f0a20645242b0bea9f8020b1326c8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/067c2114451943dcb08ba960c6662ac9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05f7ca2c9119448986cb23fda57e632a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1f309171af27496bb2f2c70fa54c5ed1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/7f06247121464cd0b071fd8ab6817e21, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/239cfe4378d7483fb4b24f785614df7a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/d0bd41310fa94f7684538ce4fd70f9d0, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/61adcc8e003e4506977ce9862916223f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a1c52058ca045059b6e347791dbc0f8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/a7d1b6ede9d5482fb16d2e041bdbc789, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/47dd6793c8d64f14a37fb426e2e8cbcb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/ba56e1ff3ab44267877d80d6c94af68f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2d5b986cf3444fd28f85f0818cbcd247, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5e409630c1354b9ea81d8c256a920003, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/70963d4f757047ec8f3e2e52b21408de, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/35dff6efa28c4733a383d2246ae3e229, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/aa835828f28c42089904c97744ed4207, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/346d0f0fbfc447779bd6cd28ad6d6fcd, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/bffe84d15749455f9183e3edddb274f4, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/296485ea6fc64f128cec6c3c4a101f64, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/027f0c9dfe444c8da0fdcd71a2d5745c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a168c6db960463a818992166ad435a1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/525c7199eb3745f18d5800aec0aab46f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/90f2e128396e45ec9a79ed0902d8a7fb] to archive 2024-11-11T12:44:28,301 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:44:28,302 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2afbc23dfdaf45c8a7a1694e33e7a12a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2afbc23dfdaf45c8a7a1694e33e7a12a 2024-11-11T12:44:28,303 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/116adffcd85740b09de06167e9684c81 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/116adffcd85740b09de06167e9684c81 2024-11-11T12:44:28,304 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05015dfa8cc84b17a497700bb5e097c3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05015dfa8cc84b17a497700bb5e097c3 2024-11-11T12:44:28,305 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2f0f0a20645242b0bea9f8020b1326c8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2f0f0a20645242b0bea9f8020b1326c8 2024-11-11T12:44:28,306 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/067c2114451943dcb08ba960c6662ac9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/067c2114451943dcb08ba960c6662ac9 2024-11-11T12:44:28,307 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05f7ca2c9119448986cb23fda57e632a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/05f7ca2c9119448986cb23fda57e632a 2024-11-11T12:44:28,308 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1f309171af27496bb2f2c70fa54c5ed1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1f309171af27496bb2f2c70fa54c5ed1 2024-11-11T12:44:28,309 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/7f06247121464cd0b071fd8ab6817e21 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/7f06247121464cd0b071fd8ab6817e21 2024-11-11T12:44:28,310 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/239cfe4378d7483fb4b24f785614df7a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/239cfe4378d7483fb4b24f785614df7a 2024-11-11T12:44:28,311 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/d0bd41310fa94f7684538ce4fd70f9d0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/d0bd41310fa94f7684538ce4fd70f9d0 2024-11-11T12:44:28,311 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/61adcc8e003e4506977ce9862916223f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/61adcc8e003e4506977ce9862916223f 2024-11-11T12:44:28,312 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a1c52058ca045059b6e347791dbc0f8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a1c52058ca045059b6e347791dbc0f8 2024-11-11T12:44:28,313 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/a7d1b6ede9d5482fb16d2e041bdbc789 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/a7d1b6ede9d5482fb16d2e041bdbc789 2024-11-11T12:44:28,314 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/47dd6793c8d64f14a37fb426e2e8cbcb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/47dd6793c8d64f14a37fb426e2e8cbcb 2024-11-11T12:44:28,315 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/ba56e1ff3ab44267877d80d6c94af68f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/ba56e1ff3ab44267877d80d6c94af68f 2024-11-11T12:44:28,316 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2d5b986cf3444fd28f85f0818cbcd247 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2d5b986cf3444fd28f85f0818cbcd247 2024-11-11T12:44:28,316 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5e409630c1354b9ea81d8c256a920003 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5e409630c1354b9ea81d8c256a920003 2024-11-11T12:44:28,317 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/70963d4f757047ec8f3e2e52b21408de to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/70963d4f757047ec8f3e2e52b21408de 2024-11-11T12:44:28,318 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/35dff6efa28c4733a383d2246ae3e229 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/35dff6efa28c4733a383d2246ae3e229 2024-11-11T12:44:28,319 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/aa835828f28c42089904c97744ed4207 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/aa835828f28c42089904c97744ed4207 2024-11-11T12:44:28,320 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/346d0f0fbfc447779bd6cd28ad6d6fcd to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/346d0f0fbfc447779bd6cd28ad6d6fcd 2024-11-11T12:44:28,320 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/bffe84d15749455f9183e3edddb274f4 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/bffe84d15749455f9183e3edddb274f4 2024-11-11T12:44:28,321 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/296485ea6fc64f128cec6c3c4a101f64 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/296485ea6fc64f128cec6c3c4a101f64 2024-11-11T12:44:28,322 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/027f0c9dfe444c8da0fdcd71a2d5745c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/027f0c9dfe444c8da0fdcd71a2d5745c 2024-11-11T12:44:28,323 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a168c6db960463a818992166ad435a1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/5a168c6db960463a818992166ad435a1 2024-11-11T12:44:28,324 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/525c7199eb3745f18d5800aec0aab46f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/525c7199eb3745f18d5800aec0aab46f 2024-11-11T12:44:28,325 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/90f2e128396e45ec9a79ed0902d8a7fb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/90f2e128396e45ec9a79ed0902d8a7fb 2024-11-11T12:44:28,327 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91af8830708f4968bdc1cf582fda38d9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/8fb32e1030544d56a2ed0f47a6b53cb3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c48784e35302408a85c56dcd3a04370f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b97da4fb83f9490aac2e076d955cb1e1, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c664c198a5b348a09008b3bf5691be14, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbcdac28ab014e41aa95516f035e22cb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/cc48fa6a15be42b7bfc375807eb01ea3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/ae74c2c4d87444e9bce4955f2385667a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/793617b77b8944cd943fe68eefb2ff95, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a9a287fc98d344009c41de6b99aa9414, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e50c53862b034ba5b02ba76c90a152d6, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/f04e418476364200bff91955c3ab27c8, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/0ba040f816804f498c53801988e44f8c, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9a3d862214ab469ea102609ee95114df, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9d74ab3bf71e43889ee6db706a5c274a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/786f726d3edc46b29dbf8103b68928d2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/aa8b38c5e2694d90a788a430e6c67b7b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e5311a9ad3004c1198e31469ae317807, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/98656308faa1455ab4ba6462d4eff025, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/83f09d85faa24702b096f12395c894e2, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b4df65cdd6c644fa920249f66b3c2598, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbd1a3751cea497389678b177d445204, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/6869dd0c187548ef9718bad43e096adb, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/01339c1b305749d0af8e3ca9e65f2f1e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/eaf50be55b7740f0ba33b21945df0e9b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/3e51d3ce07ec40d683a0d5a2769c86ab, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/4156fd8b97e0466fa4207e7031b42be0] to archive 2024-11-11T12:44:28,328 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:44:28,329 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91af8830708f4968bdc1cf582fda38d9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91af8830708f4968bdc1cf582fda38d9 2024-11-11T12:44:28,330 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/8fb32e1030544d56a2ed0f47a6b53cb3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/8fb32e1030544d56a2ed0f47a6b53cb3 2024-11-11T12:44:28,331 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c48784e35302408a85c56dcd3a04370f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c48784e35302408a85c56dcd3a04370f 2024-11-11T12:44:28,332 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b97da4fb83f9490aac2e076d955cb1e1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b97da4fb83f9490aac2e076d955cb1e1 2024-11-11T12:44:28,333 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c664c198a5b348a09008b3bf5691be14 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/c664c198a5b348a09008b3bf5691be14 2024-11-11T12:44:28,334 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbcdac28ab014e41aa95516f035e22cb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbcdac28ab014e41aa95516f035e22cb 2024-11-11T12:44:28,335 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/cc48fa6a15be42b7bfc375807eb01ea3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/cc48fa6a15be42b7bfc375807eb01ea3 2024-11-11T12:44:28,336 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/ae74c2c4d87444e9bce4955f2385667a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/ae74c2c4d87444e9bce4955f2385667a 2024-11-11T12:44:28,336 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/793617b77b8944cd943fe68eefb2ff95 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/793617b77b8944cd943fe68eefb2ff95 2024-11-11T12:44:28,337 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a9a287fc98d344009c41de6b99aa9414 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a9a287fc98d344009c41de6b99aa9414 2024-11-11T12:44:28,338 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e50c53862b034ba5b02ba76c90a152d6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e50c53862b034ba5b02ba76c90a152d6 2024-11-11T12:44:28,339 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/f04e418476364200bff91955c3ab27c8 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/f04e418476364200bff91955c3ab27c8 2024-11-11T12:44:28,340 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/0ba040f816804f498c53801988e44f8c to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/0ba040f816804f498c53801988e44f8c 2024-11-11T12:44:28,341 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9a3d862214ab469ea102609ee95114df to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9a3d862214ab469ea102609ee95114df 2024-11-11T12:44:28,341 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9d74ab3bf71e43889ee6db706a5c274a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/9d74ab3bf71e43889ee6db706a5c274a 2024-11-11T12:44:28,342 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/786f726d3edc46b29dbf8103b68928d2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/786f726d3edc46b29dbf8103b68928d2 2024-11-11T12:44:28,343 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/aa8b38c5e2694d90a788a430e6c67b7b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/aa8b38c5e2694d90a788a430e6c67b7b 2024-11-11T12:44:28,344 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e5311a9ad3004c1198e31469ae317807 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e5311a9ad3004c1198e31469ae317807 2024-11-11T12:44:28,345 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/98656308faa1455ab4ba6462d4eff025 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/98656308faa1455ab4ba6462d4eff025 2024-11-11T12:44:28,346 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/83f09d85faa24702b096f12395c894e2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/83f09d85faa24702b096f12395c894e2 2024-11-11T12:44:28,346 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b4df65cdd6c644fa920249f66b3c2598 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/b4df65cdd6c644fa920249f66b3c2598 2024-11-11T12:44:28,347 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbd1a3751cea497389678b177d445204 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/bbd1a3751cea497389678b177d445204 2024-11-11T12:44:28,348 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/6869dd0c187548ef9718bad43e096adb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/6869dd0c187548ef9718bad43e096adb 2024-11-11T12:44:28,349 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/01339c1b305749d0af8e3ca9e65f2f1e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/01339c1b305749d0af8e3ca9e65f2f1e 2024-11-11T12:44:28,350 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/eaf50be55b7740f0ba33b21945df0e9b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/eaf50be55b7740f0ba33b21945df0e9b 2024-11-11T12:44:28,351 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/3e51d3ce07ec40d683a0d5a2769c86ab to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/3e51d3ce07ec40d683a0d5a2769c86ab 2024-11-11T12:44:28,352 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/4156fd8b97e0466fa4207e7031b42be0 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/4156fd8b97e0466fa4207e7031b42be0 2024-11-11T12:44:28,353 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] regionserver.HStore(2316): Moving the files [hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/d78fb23f0472454aa8fbe543cacf84b7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0240677b27e7418db96a08d74890680d, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c83508e720cd4ddfaf9b3f9dda8b4862, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1e1c3f9fe2364ffb85261439fd127e75, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f7b94925f9bf4b2ca909cd673c1c42da, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d805ec9d46a441089222539ed0e3493, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ea929f0468544078bf24b3b83005501f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/a1e47bcdb0d94c00917bdba4ac0ec280, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0b446eca86564fa492cbbe91b2499025, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c72ebf370ced4b26836fddca49cd9752, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/8f0ff88ef2dc45ff952621cb7a0be07e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb1eb291e80f4b5bab5518111ce7ac3f, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/08dbd748b75a44e98028212f6fafdbf7, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb99d435689a4d3c8deda76e2f119912, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3e2d70789552433193b614f7209edf10, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/2babbc458c114cdcb5dfd9014b1b137a, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/354f200c097e45b1b073e7c5978880b9, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f1f63508abbe4682a1f9ff78b403ff42, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/95340fc84232420b84762c80c4b7a8c3, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/4172af86cb6640f0b68d72af7d48ab1b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ad5dbfa3248346d6ad01a35ebc7c4599, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07c3901ef3ff40399e11aa6622d7a950, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d41e71850df4f75bd8b6e05af02313b, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/717b5d14587b4431875906299f8c9b3e, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/acf67d7253b74ef8aa4fc57cada4ea08] to archive 2024-11-11T12:44:28,354 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-11T12:44:28,355 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/d78fb23f0472454aa8fbe543cacf84b7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/d78fb23f0472454aa8fbe543cacf84b7 2024-11-11T12:44:28,356 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0240677b27e7418db96a08d74890680d to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0240677b27e7418db96a08d74890680d 2024-11-11T12:44:28,357 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c83508e720cd4ddfaf9b3f9dda8b4862 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c83508e720cd4ddfaf9b3f9dda8b4862 2024-11-11T12:44:28,358 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1e1c3f9fe2364ffb85261439fd127e75 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1e1c3f9fe2364ffb85261439fd127e75 2024-11-11T12:44:28,358 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f7b94925f9bf4b2ca909cd673c1c42da to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f7b94925f9bf4b2ca909cd673c1c42da 2024-11-11T12:44:28,359 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d805ec9d46a441089222539ed0e3493 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d805ec9d46a441089222539ed0e3493 2024-11-11T12:44:28,360 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ea929f0468544078bf24b3b83005501f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ea929f0468544078bf24b3b83005501f 2024-11-11T12:44:28,361 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/a1e47bcdb0d94c00917bdba4ac0ec280 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/a1e47bcdb0d94c00917bdba4ac0ec280 2024-11-11T12:44:28,362 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0b446eca86564fa492cbbe91b2499025 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/0b446eca86564fa492cbbe91b2499025 2024-11-11T12:44:28,363 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c72ebf370ced4b26836fddca49cd9752 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/c72ebf370ced4b26836fddca49cd9752 2024-11-11T12:44:28,364 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/8f0ff88ef2dc45ff952621cb7a0be07e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/8f0ff88ef2dc45ff952621cb7a0be07e 2024-11-11T12:44:28,365 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb1eb291e80f4b5bab5518111ce7ac3f to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb1eb291e80f4b5bab5518111ce7ac3f 2024-11-11T12:44:28,365 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/08dbd748b75a44e98028212f6fafdbf7 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/08dbd748b75a44e98028212f6fafdbf7 2024-11-11T12:44:28,366 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb99d435689a4d3c8deda76e2f119912 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cb99d435689a4d3c8deda76e2f119912 2024-11-11T12:44:28,367 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3d2f1dfab6d1473ea5b35bf54ab0ed1a 2024-11-11T12:44:28,368 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3e2d70789552433193b614f7209edf10 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3e2d70789552433193b614f7209edf10 2024-11-11T12:44:28,369 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/2babbc458c114cdcb5dfd9014b1b137a to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/2babbc458c114cdcb5dfd9014b1b137a 2024-11-11T12:44:28,370 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/354f200c097e45b1b073e7c5978880b9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/354f200c097e45b1b073e7c5978880b9 2024-11-11T12:44:28,371 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f1f63508abbe4682a1f9ff78b403ff42 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/f1f63508abbe4682a1f9ff78b403ff42 2024-11-11T12:44:28,371 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/95340fc84232420b84762c80c4b7a8c3 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/95340fc84232420b84762c80c4b7a8c3 2024-11-11T12:44:28,373 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/4172af86cb6640f0b68d72af7d48ab1b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/4172af86cb6640f0b68d72af7d48ab1b 2024-11-11T12:44:28,373 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ad5dbfa3248346d6ad01a35ebc7c4599 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/ad5dbfa3248346d6ad01a35ebc7c4599 2024-11-11T12:44:28,374 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07c3901ef3ff40399e11aa6622d7a950 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07c3901ef3ff40399e11aa6622d7a950 2024-11-11T12:44:28,375 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d41e71850df4f75bd8b6e05af02313b to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/1d41e71850df4f75bd8b6e05af02313b 2024-11-11T12:44:28,376 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/717b5d14587b4431875906299f8c9b3e to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/717b5d14587b4431875906299f8c9b3e 2024-11-11T12:44:28,377 DEBUG [StoreCloser-TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/acf67d7253b74ef8aa4fc57cada4ea08 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/acf67d7253b74ef8aa4fc57cada4ea08 2024-11-11T12:44:28,381 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/recovered.edits/448.seqid, newMaxSeqId=448, maxSeqId=4 2024-11-11T12:44:28,382 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1922): Closed TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776. 2024-11-11T12:44:28,382 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] regionserver.HRegion(1635): Region close journal for cc6fe7bde4d6aa548700eb200610e776: 2024-11-11T12:44:28,383 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION, pid=180}] handler.UnassignRegionHandler(170): Closed cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,383 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=179 updating hbase:meta row=cc6fe7bde4d6aa548700eb200610e776, regionState=CLOSED 2024-11-11T12:44:28,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=180, resume processing ppid=179 2024-11-11T12:44:28,386 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, ppid=179, state=SUCCESS; CloseRegionProcedure cc6fe7bde4d6aa548700eb200610e776, server=32e78532c8b1,44673,1731328897232 in 1.9190 sec 2024-11-11T12:44:28,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=179, resume processing ppid=178 2024-11-11T12:44:28,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, ppid=178, state=SUCCESS; TransitRegionStateProcedure table=TestAcidGuarantees, region=cc6fe7bde4d6aa548700eb200610e776, UNASSIGN in 1.9230 sec 2024-11-11T12:44:28,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=177 2024-11-11T12:44:28,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=177, state=SUCCESS; CloseTableRegionsProcedure table=TestAcidGuarantees in 1.9260 sec 2024-11-11T12:44:28,390 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731329068390"}]},"ts":"1731329068390"} 2024-11-11T12:44:28,391 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=TestAcidGuarantees, state=DISABLED in hbase:meta 2024-11-11T12:44:28,394 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set TestAcidGuarantees to state=DISABLED 2024-11-11T12:44:28,395 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, state=SUCCESS; DisableTableProcedure table=TestAcidGuarantees in 1.9390 sec 2024-11-11T12:44:28,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=177 2024-11-11T12:44:28,561 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:TestAcidGuarantees, procId: 177 completed 2024-11-11T12:44:28,562 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.3 delete TestAcidGuarantees 2024-11-11T12:44:28,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] procedure2.ProcedureExecutor(1098): Stored pid=181, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:28,563 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=181, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:28,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-11T12:44:28,564 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=181, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:28,566 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,568 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C, FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/recovered.edits] 2024-11-11T12:44:28,571 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1833bd1ec506459081ea8e5ba39962ff to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/1833bd1ec506459081ea8e5ba39962ff 2024-11-11T12:44:28,573 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2996050abdaa452da5e814501a73b0f6 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/2996050abdaa452da5e814501a73b0f6 2024-11-11T12:44:28,574 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/e2a4be508faf45cc9879201d7125bbff to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/e2a4be508faf45cc9879201d7125bbff 2024-11-11T12:44:28,575 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/faaa3896e7ef4c2fa53c411a8a179feb to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/A/faaa3896e7ef4c2fa53c411a8a179feb 2024-11-11T12:44:28,577 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91da32c841a342fbaca98d1bbbe1ab89 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/91da32c841a342fbaca98d1bbbe1ab89 2024-11-11T12:44:28,579 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a2c024cb0d7445fcbebdf4e5ad1d6b64 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/a2c024cb0d7445fcbebdf4e5ad1d6b64 2024-11-11T12:44:28,580 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/d90f5dbd9cd146da94c31da04b8f4cd2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/d90f5dbd9cd146da94c31da04b8f4cd2 2024-11-11T12:44:28,581 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e153e60399b649ab92ccb5d4ce39c5c1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/B/e153e60399b649ab92ccb5d4ce39c5c1 2024-11-11T12:44:28,583 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07359805b57b4b73a706e316995a42f1 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/07359805b57b4b73a706e316995a42f1 2024-11-11T12:44:28,584 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3eb32659387641ce900ab2a5669f7ee2 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/3eb32659387641ce900ab2a5669f7ee2 2024-11-11T12:44:28,585 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/9369ea362c3444bab0340cc5078d9763 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/9369ea362c3444bab0340cc5078d9763 2024-11-11T12:44:28,586 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cd01dae742744e789e91a40db4702ab9 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/C/cd01dae742744e789e91a40db4702ab9 2024-11-11T12:44:28,589 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/recovered.edits/448.seqid to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776/recovered.edits/448.seqid 2024-11-11T12:44:28,590 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/default/TestAcidGuarantees/cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,590 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived TestAcidGuarantees regions 2024-11-11T12:44:28,590 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-11T12:44:28,591 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A] 2024-11-11T12:44:28,594 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110a2dbe92cb664e72b9685926a495a072_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110a2dbe92cb664e72b9685926a495a072_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,595 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110c73ec23bf5d45ba9d98fe4d3fa90d4d_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110c73ec23bf5d45ba9d98fe4d3fa90d4d_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,596 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110fb941278e6644349fd3ebe3c135b95d_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411110fb941278e6644349fd3ebe3c135b95d_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,597 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111272aeb440bc44c6b9c44c512f50f5bcb_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111272aeb440bc44c6b9c44c512f50f5bcb_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,598 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111141b156d734fb4df18d1e5b6b70abd6c1_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111141b156d734fb4df18d1e5b6b70abd6c1_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,599 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111498c12bc982148ce8a5175a0b20ec530_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111498c12bc982148ce8a5175a0b20ec530_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,601 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411114d2e40ccf50a44979c1bc0fc9b9eeff7_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411114d2e40ccf50a44979c1bc0fc9b9eeff7_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,602 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411114d38fbc8afa240d093c0d52ded8606c7_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411114d38fbc8afa240d093c0d52ded8606c7_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,603 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111162e64ce74f2049a280f579698ebe97c9_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111162e64ce74f2049a280f579698ebe97c9_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,604 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411117897f6e5ad894468b3848deff297f8cb_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411117897f6e5ad894468b3848deff297f8cb_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,606 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111179dcc92a8693425bad11e0df51bab907_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111179dcc92a8693425bad11e0df51bab907_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,607 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111186f5e4d5bf2c4c7692026fb4d70880d2_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111186f5e4d5bf2c4c7692026fb4d70880d2_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,609 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411118c111e5583c7418f860c4ed9978e0347_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411118c111e5583c7418f860c4ed9978e0347_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,610 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111194fec8055e824b729935dfdfd9b443f6_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e2024111194fec8055e824b729935dfdfd9b443f6_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,611 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119d1ccaddbe2c49039e2d12a056fea511_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e202411119d1ccaddbe2c49039e2d12a056fea511_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,612 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111a1743446d6ae46a1abf9e9c5221af80d_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111a1743446d6ae46a1abf9e9c5221af80d_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,614 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111a53b3b31e5344f4dacabffa6d69adb9e_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111a53b3b31e5344f4dacabffa6d69adb9e_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,615 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ae4056843cc245f2a4243141be9599fd_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ae4056843cc245f2a4243141be9599fd_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,616 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b1502f82b3f4412f9e7d427c53e65636_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111b1502f82b3f4412f9e7d427c53e65636_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,617 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111d19584a608d14c08840d1459c5ed5408_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111d19584a608d14c08840d1459c5ed5408_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,617 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111dfaa2562f29e46088671b5f9533fd962_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111dfaa2562f29e46088671b5f9533fd962_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,618 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e2a7f0d2196b4d1bb2212515e5578660_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111e2a7f0d2196b4d1bb2212515e5578660_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,619 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ebd8232a67d14529b8d28ec61b9e71b6_cc6fe7bde4d6aa548700eb200610e776 to hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/archive/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3/A/d41d8cd98f00b204e9800998ecf8427e20241111ebd8232a67d14529b8d28ec61b9e71b6_cc6fe7bde4d6aa548700eb200610e776 2024-11-11T12:44:28,620 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/mobdir/data/default/TestAcidGuarantees/6995762b1b921fda31b7b26a0c5785c3 2024-11-11T12:44:28,622 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=181, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:28,623 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of TestAcidGuarantees from hbase:meta 2024-11-11T12:44:28,625 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'TestAcidGuarantees' descriptor. 2024-11-11T12:44:28,626 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=181, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:28,626 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'TestAcidGuarantees' from region states. 2024-11-11T12:44:28,626 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1731329068626"}]},"ts":"9223372036854775807"} 2024-11-11T12:44:28,628 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-11-11T12:44:28,628 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => cc6fe7bde4d6aa548700eb200610e776, NAME => 'TestAcidGuarantees,,1731329042687.cc6fe7bde4d6aa548700eb200610e776.', STARTKEY => '', ENDKEY => ''}] 2024-11-11T12:44:28,628 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'TestAcidGuarantees' as deleted. 2024-11-11T12:44:28,628 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"TestAcidGuarantees","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1731329068628"}]},"ts":"9223372036854775807"} 2024-11-11T12:44:28,629 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table TestAcidGuarantees state from META 2024-11-11T12:44:28,640 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=181, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=TestAcidGuarantees 2024-11-11T12:44:28,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, state=SUCCESS; DeleteTableProcedure table=TestAcidGuarantees in 78 msec 2024-11-11T12:44:28,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40877 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=181 2024-11-11T12:44:28,665 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:TestAcidGuarantees, procId: 181 completed 2024-11-11T12:44:28,676 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: TestAcidGuaranteesWithBasicPolicy#testMobGetAtomicity Thread=239 (was 238) - Thread LEAK? -, OpenFileDescriptor=461 (was 453) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=829 (was 848), ProcessCount=9 (was 9), AvailableMemoryMB=2109 (was 2430) 2024-11-11T12:44:28,676 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-11-11T12:44:28,676 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-11-11T12:44:28,676 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x76523d14 to 127.0.0.1:54294 2024-11-11T12:44:28,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:28,676 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-11T12:44:28,676 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1390920325, stopped=false 2024-11-11T12:44:28,677 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=32e78532c8b1,40877,1731328896051 2024-11-11T12:44:28,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T12:44:28,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-11T12:44:28,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:44:28,679 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-11-11T12:44:28,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:44:28,679 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T12:44:28,680 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-11T12:44:28,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:28,680 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '32e78532c8b1,44673,1731328897232' ***** 2024-11-11T12:44:28,680 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-11-11T12:44:28,680 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-11T12:44:28,680 INFO [RS:0;32e78532c8b1:44673 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-11T12:44:28,680 INFO [RS:0;32e78532c8b1:44673 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-11T12:44:28,681 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-11-11T12:44:28,681 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(3579): Received CLOSE for 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:44:28,681 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1224): stopping server 32e78532c8b1,44673,1731328897232 2024-11-11T12:44:28,681 DEBUG [RS:0;32e78532c8b1:44673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:28,681 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-11T12:44:28,681 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-11T12:44:28,681 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-11T12:44:28,681 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-11-11T12:44:28,681 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 69716d04bd60881dfce8676dd10b689d, disabling compactions & flushes 2024-11-11T12:44:28,681 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:44:28,681 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:44:28,681 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. after waiting 0 ms 2024-11-11T12:44:28,681 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:44:28,681 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-11-11T12:44:28,681 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 69716d04bd60881dfce8676dd10b689d 1/1 column families, dataSize=78 B heapSize=488 B 2024-11-11T12:44:28,681 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1603): Online Regions={69716d04bd60881dfce8676dd10b689d=hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d., 1588230740=hbase:meta,,1.1588230740} 2024-11-11T12:44:28,682 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-11-11T12:44:28,682 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-11-11T12:44:28,682 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-11-11T12:44:28,682 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-11T12:44:28,682 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-11-11T12:44:28,682 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=20.55 KB heapSize=35.87 KB 2024-11-11T12:44:28,682 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:44:28,704 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d/.tmp/info/9b031d8065224b20b5d6351727f50bc4 is 45, key is default/info:d/1731328901690/Put/seqid=0 2024-11-11T12:44:28,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742524_1700 (size=5037) 2024-11-11T12:44:28,710 INFO [regionserver/32e78532c8b1:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T12:44:28,710 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/info/de3927f153be4c67903ab641dc29e71d is 143, key is hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d./info:regioninfo/1731328901568/Put/seqid=0 2024-11-11T12:44:28,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742525_1701 (size=7725) 2024-11-11T12:44:28,713 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.89 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/info/de3927f153be4c67903ab641dc29e71d 2024-11-11T12:44:28,736 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/rep_barrier/af9a8821e4bd4a2d93dc4caa12f3ad7d is 102, key is TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72./rep_barrier:/1731328928461/DeleteFamily/seqid=0 2024-11-11T12:44:28,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742526_1702 (size=6025) 2024-11-11T12:44:28,882 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:44:29,083 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, 69716d04bd60881dfce8676dd10b689d 2024-11-11T12:44:29,108 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d/.tmp/info/9b031d8065224b20b5d6351727f50bc4 2024-11-11T12:44:29,112 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d/.tmp/info/9b031d8065224b20b5d6351727f50bc4 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d/info/9b031d8065224b20b5d6351727f50bc4 2024-11-11T12:44:29,115 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d/info/9b031d8065224b20b5d6351727f50bc4, entries=2, sequenceid=6, filesize=4.9 K 2024-11-11T12:44:29,116 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for 69716d04bd60881dfce8676dd10b689d in 434ms, sequenceid=6, compaction requested=false 2024-11-11T12:44:29,119 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/namespace/69716d04bd60881dfce8676dd10b689d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-11T12:44:29,119 INFO [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:44:29,119 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 69716d04bd60881dfce8676dd10b689d: 2024-11-11T12:44:29,119 DEBUG [RS_CLOSE_REGION-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1731328900195.69716d04bd60881dfce8676dd10b689d. 2024-11-11T12:44:29,140 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=588 B at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/rep_barrier/af9a8821e4bd4a2d93dc4caa12f3ad7d 2024-11-11T12:44:29,161 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/table/e817a47a443645249dcb9972d70d6c82 is 96, key is TestAcidGuarantees,,1731328901929.0a6a9f82df0ac9ece8343137343e2f72./table:/1731328928461/DeleteFamily/seqid=0 2024-11-11T12:44:29,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742527_1703 (size=5942) 2024-11-11T12:44:29,174 INFO [regionserver/32e78532c8b1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-11T12:44:29,175 INFO [regionserver/32e78532c8b1:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-11T12:44:29,283 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-11T12:44:29,483 DEBUG [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1629): Waiting on 1588230740 2024-11-11T12:44:29,566 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.08 KB at sequenceid=93 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/table/e817a47a443645249dcb9972d70d6c82 2024-11-11T12:44:29,570 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/info/de3927f153be4c67903ab641dc29e71d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/info/de3927f153be4c67903ab641dc29e71d 2024-11-11T12:44:29,573 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/info/de3927f153be4c67903ab641dc29e71d, entries=22, sequenceid=93, filesize=7.5 K 2024-11-11T12:44:29,574 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/rep_barrier/af9a8821e4bd4a2d93dc4caa12f3ad7d as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/rep_barrier/af9a8821e4bd4a2d93dc4caa12f3ad7d 2024-11-11T12:44:29,577 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/rep_barrier/af9a8821e4bd4a2d93dc4caa12f3ad7d, entries=6, sequenceid=93, filesize=5.9 K 2024-11-11T12:44:29,577 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/.tmp/table/e817a47a443645249dcb9972d70d6c82 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/table/e817a47a443645249dcb9972d70d6c82 2024-11-11T12:44:29,580 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/table/e817a47a443645249dcb9972d70d6c82, entries=9, sequenceid=93, filesize=5.8 K 2024-11-11T12:44:29,581 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~20.55 KB/21040, heapSize ~35.82 KB/36680, currentSize=0 B/0 for 1588230740 in 899ms, sequenceid=93, compaction requested=false 2024-11-11T12:44:29,584 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/data/hbase/meta/1588230740/recovered.edits/96.seqid, newMaxSeqId=96, maxSeqId=1 2024-11-11T12:44:29,585 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-11T12:44:29,585 INFO [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-11-11T12:44:29,585 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-11-11T12:44:29,585 DEBUG [RS_CLOSE_META-regionserver/32e78532c8b1:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-11T12:44:29,683 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1250): stopping server 32e78532c8b1,44673,1731328897232; all regions closed. 2024-11-11T12:44:29,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741834_1010 (size=26050) 2024-11-11T12:44:29,687 WARN [Close-WAL-Writer-0 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(725): complete file /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/WALs/32e78532c8b1,44673,1731328897232/32e78532c8b1%2C44673%2C1731328897232.meta.1731328899895.meta not finished, retry = 0 2024-11-11T12:44:29,790 DEBUG [RS:0;32e78532c8b1:44673 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/oldWALs 2024-11-11T12:44:29,790 INFO [RS:0;32e78532c8b1:44673 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 32e78532c8b1%2C44673%2C1731328897232.meta:.meta(num 1731328899895) 2024-11-11T12:44:29,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741833_1009 (size=15821880) 2024-11-11T12:44:29,794 DEBUG [RS:0;32e78532c8b1:44673 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/oldWALs 2024-11-11T12:44:29,794 INFO [RS:0;32e78532c8b1:44673 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 32e78532c8b1%2C44673%2C1731328897232:(num 1731328899341) 2024-11-11T12:44:29,794 DEBUG [RS:0;32e78532c8b1:44673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:29,794 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.LeaseManager(133): Closed leases 2024-11-11T12:44:29,795 INFO [RS:0;32e78532c8b1:44673 {}] hbase.ChoreService(370): Chore service for: regionserver/32e78532c8b1:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-11T12:44:29,795 INFO [regionserver/32e78532c8b1:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T12:44:29,795 INFO [RS:0;32e78532c8b1:44673 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.3:44673 2024-11-11T12:44:29,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-11T12:44:29,799 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/32e78532c8b1,44673,1731328897232 2024-11-11T12:44:29,801 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [32e78532c8b1,44673,1731328897232] 2024-11-11T12:44:29,801 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 32e78532c8b1,44673,1731328897232; numProcessing=1 2024-11-11T12:44:29,802 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/32e78532c8b1,44673,1731328897232 already deleted, retry=false 2024-11-11T12:44:29,803 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 32e78532c8b1,44673,1731328897232 expired; onlineServers=0 2024-11-11T12:44:29,803 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '32e78532c8b1,40877,1731328896051' ***** 2024-11-11T12:44:29,803 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-11T12:44:29,803 DEBUG [M:0;32e78532c8b1:40877 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b2972ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=32e78532c8b1/172.17.0.3:0 2024-11-11T12:44:29,803 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HRegionServer(1224): stopping server 32e78532c8b1,40877,1731328896051 2024-11-11T12:44:29,803 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HRegionServer(1250): stopping server 32e78532c8b1,40877,1731328896051; all regions closed. 2024-11-11T12:44:29,803 DEBUG [M:0;32e78532c8b1:40877 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-11T12:44:29,803 DEBUG [M:0;32e78532c8b1:40877 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-11T12:44:29,803 DEBUG [M:0;32e78532c8b1:40877 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-11T12:44:29,803 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-11T12:44:29,803 DEBUG [master/32e78532c8b1:0:becomeActiveMaster-HFileCleaner.small.0-1731328898936 {}] cleaner.HFileCleaner(306): Exit Thread[master/32e78532c8b1:0:becomeActiveMaster-HFileCleaner.small.0-1731328898936,5,FailOnTimeoutGroup] 2024-11-11T12:44:29,803 DEBUG [master/32e78532c8b1:0:becomeActiveMaster-HFileCleaner.large.0-1731328898934 {}] cleaner.HFileCleaner(306): Exit Thread[master/32e78532c8b1:0:becomeActiveMaster-HFileCleaner.large.0-1731328898934,5,FailOnTimeoutGroup] 2024-11-11T12:44:29,804 INFO [M:0;32e78532c8b1:40877 {}] hbase.ChoreService(370): Chore service for: master/32e78532c8b1:0 had [] on shutdown 2024-11-11T12:44:29,804 DEBUG [M:0;32e78532c8b1:40877 {}] master.HMaster(1733): Stopping service threads 2024-11-11T12:44:29,804 INFO [M:0;32e78532c8b1:40877 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-11T12:44:29,804 ERROR [M:0;32e78532c8b1:40877 {}] procedure2.ProcedureExecutor(722): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[HFileArchiver-5,5,PEWorkerGroup] Thread[IPC Client (59733779) connection to localhost/127.0.0.1:42421 from jenkins,5,PEWorkerGroup] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:42421,5,PEWorkerGroup] Thread[HFileArchiver-6,5,PEWorkerGroup] 2024-11-11T12:44:29,805 INFO [M:0;32e78532c8b1:40877 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-11T12:44:29,805 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-11T12:44:29,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-11T12:44:29,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-11T12:44:29,805 DEBUG [M:0;32e78532c8b1:40877 {}] zookeeper.ZKUtil(347): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-11T12:44:29,805 WARN [M:0;32e78532c8b1:40877 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-11T12:44:29,805 INFO [M:0;32e78532c8b1:40877 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-11-11T12:44:29,805 INFO [M:0;32e78532c8b1:40877 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-11T12:44:29,806 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-11T12:44:29,806 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T12:44:29,806 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T12:44:29,806 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-11T12:44:29,806 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T12:44:29,806 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=768.14 KB heapSize=944.23 KB 2024-11-11T12:44:29,808 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-11T12:44:29,825 DEBUG [M:0;32e78532c8b1:40877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f4856e0c5f7e445d917a240123116e6e is 82, key is hbase:meta,,1/info:regioninfo/1731328900072/Put/seqid=0 2024-11-11T12:44:29,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742528_1704 (size=5672) 2024-11-11T12:44:29,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T12:44:29,901 INFO [RS:0;32e78532c8b1:44673 {}] regionserver.HRegionServer(1307): Exiting; stopping=32e78532c8b1,44673,1731328897232; zookeeper connection closed. 2024-11-11T12:44:29,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44673-0x1019759ddb90001, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T12:44:29,902 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2d62ec2a {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2d62ec2a 2024-11-11T12:44:29,902 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-11T12:44:30,230 INFO [M:0;32e78532c8b1:40877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=2167 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f4856e0c5f7e445d917a240123116e6e 2024-11-11T12:44:30,256 DEBUG [M:0;32e78532c8b1:40877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/effb31d9fbf849629ca0b0ec280a93a3 is 2278, key is \x00\x00\x00\x00\x00\x00\x00\x9A/proc:d/1731329044734/Put/seqid=0 2024-11-11T12:44:30,264 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742529_1705 (size=45365) 2024-11-11T12:44:30,264 INFO [M:0;32e78532c8b1:40877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=767.58 KB at sequenceid=2167 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/effb31d9fbf849629ca0b0ec280a93a3 2024-11-11T12:44:30,267 INFO [M:0;32e78532c8b1:40877 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for effb31d9fbf849629ca0b0ec280a93a3 2024-11-11T12:44:30,284 DEBUG [M:0;32e78532c8b1:40877 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b3233da15b6430f9bc3cdb026d2f58a is 69, key is 32e78532c8b1,44673,1731328897232/rs:state/1731328899040/Put/seqid=0 2024-11-11T12:44:30,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073742530_1706 (size=5156) 2024-11-11T12:44:30,687 INFO [M:0;32e78532c8b1:40877 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=2167 (bloomFilter=true), to=hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b3233da15b6430f9bc3cdb026d2f58a 2024-11-11T12:44:30,691 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/f4856e0c5f7e445d917a240123116e6e as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f4856e0c5f7e445d917a240123116e6e 2024-11-11T12:44:30,694 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/f4856e0c5f7e445d917a240123116e6e, entries=8, sequenceid=2167, filesize=5.5 K 2024-11-11T12:44:30,695 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/effb31d9fbf849629ca0b0ec280a93a3 as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/effb31d9fbf849629ca0b0ec280a93a3 2024-11-11T12:44:30,698 INFO [M:0;32e78532c8b1:40877 {}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for effb31d9fbf849629ca0b0ec280a93a3 2024-11-11T12:44:30,698 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/effb31d9fbf849629ca0b0ec280a93a3, entries=181, sequenceid=2167, filesize=44.3 K 2024-11-11T12:44:30,699 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2b3233da15b6430f9bc3cdb026d2f58a as hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2b3233da15b6430f9bc3cdb026d2f58a 2024-11-11T12:44:30,702 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:42421/user/jenkins/test-data/f87a26b0-00da-6be3-0e56-b7d2e2016b18/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2b3233da15b6430f9bc3cdb026d2f58a, entries=1, sequenceid=2167, filesize=5.0 K 2024-11-11T12:44:30,702 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(3040): Finished flush of dataSize ~768.14 KB/786575, heapSize ~943.94 KB/966592, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 896ms, sequenceid=2167, compaction requested=false 2024-11-11T12:44:30,705 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-11T12:44:30,705 DEBUG [M:0;32e78532c8b1:40877 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-11-11T12:44:30,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44919 is added to blk_1073741830_1006 (size=928677) 2024-11-11T12:44:30,710 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-11-11T12:44:30,710 INFO [M:0;32e78532c8b1:40877 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-11-11T12:44:30,710 INFO [M:0;32e78532c8b1:40877 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.3:40877 2024-11-11T12:44:30,711 DEBUG [M:0;32e78532c8b1:40877 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/32e78532c8b1,40877,1731328896051 already deleted, retry=false 2024-11-11T12:44:30,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T12:44:30,813 INFO [M:0;32e78532c8b1:40877 {}] regionserver.HRegionServer(1307): Exiting; stopping=32e78532c8b1,40877,1731328896051; zookeeper connection closed. 2024-11-11T12:44:30,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40877-0x1019759ddb90000, quorum=127.0.0.1:54294, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-11T12:44:30,818 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1f79ec76{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-11T12:44:30,821 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@576ebda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T12:44:30,821 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T12:44:30,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4727fac8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T12:44:30,821 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47db50b9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/hadoop.log.dir/,STOPPED} 2024-11-11T12:44:30,826 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-11T12:44:30,826 WARN [BP-304736114-172.17.0.3-1731328892201 heartbeating to localhost/127.0.0.1:42421 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-11T12:44:30,826 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-11T12:44:30,826 WARN [BP-304736114-172.17.0.3-1731328892201 heartbeating to localhost/127.0.0.1:42421 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-304736114-172.17.0.3-1731328892201 (Datanode Uuid b74d14b0-0b0d-4c6b-bf00-ddaeb3d8be59) service to localhost/127.0.0.1:42421 2024-11-11T12:44:30,828 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/dfs/data/data1/current/BP-304736114-172.17.0.3-1731328892201 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T12:44:30,828 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/cluster_04c40d3e-47b4-9d2d-23e5-c4dee1cc605f/dfs/data/data2/current/BP-304736114-172.17.0.3-1731328892201 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-11T12:44:30,829 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-11T12:44:30,836 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@b03fcff{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-11T12:44:30,837 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e0a3ea0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-11T12:44:30,837 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-11T12:44:30,837 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62a9beb2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-11T12:44:30,838 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8167a4c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-server/target/test-data/149b802a-a295-d749-cd74-17f48f7665b8/hadoop.log.dir/,STOPPED} 2024-11-11T12:44:30,856 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-11-11T12:44:31,035 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down